4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_OSC
44 #include "osc_cl_internal.h"
50 /*****************************************************************************
56 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
58 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
59 return container_of0(slice, struct osc_req, or_cl);
62 static struct osc_io *cl2osc_io(const struct lu_env *env,
63 const struct cl_io_slice *slice)
65 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
66 LINVRNT(oio == osc_env_io(env));
70 static struct osc_page *osc_cl_page_osc(struct cl_page *page,
71 struct osc_object *osc)
73 const struct cl_page_slice *slice;
76 slice = cl_object_page_slice(&osc->oo_cl, page);
78 slice = cl_page_at(page, &osc_device_type);
79 LASSERT(slice != NULL);
81 return cl2osc_page(slice);
85 /*****************************************************************************
91 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
95 static void osc_read_ahead_release(const struct lu_env *env,
98 struct ldlm_lock *dlmlock = cbdata;
99 struct lustre_handle lockh;
101 ldlm_lock2handle(dlmlock, &lockh);
102 ldlm_lock_decref(&lockh, LCK_PR);
103 LDLM_LOCK_PUT(dlmlock);
106 static int osc_io_read_ahead(const struct lu_env *env,
107 const struct cl_io_slice *ios,
108 pgoff_t start, struct cl_read_ahead *ra)
110 struct osc_object *osc = cl2osc(ios->cis_obj);
111 struct ldlm_lock *dlmlock;
112 int result = -ENODATA;
115 dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
116 if (dlmlock != NULL) {
117 if (dlmlock->l_req_mode != LCK_PR) {
118 struct lustre_handle lockh;
119 ldlm_lock2handle(dlmlock, &lockh);
120 ldlm_lock_addref(&lockh, LCK_PR);
121 ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
124 ra->cra_end = cl_index(osc2cl(osc),
125 dlmlock->l_policy_data.l_extent.end);
126 ra->cra_release = osc_read_ahead_release;
127 ra->cra_cbdata = dlmlock;
135 * An implementation of cl_io_operations::cio_io_submit() method for osc
136 * layer. Iterates over pages in the in-queue, prepares each for io by calling
137 * cl_page_prep() and then either submits them through osc_io_submit_page()
138 * or, if page is already submitted, changes osc flags through
139 * osc_set_async_flags().
141 static int osc_io_submit(const struct lu_env *env,
142 const struct cl_io_slice *ios,
143 enum cl_req_type crt, struct cl_2queue *queue)
145 struct cl_page *page;
147 struct client_obd *cli = NULL;
148 struct osc_object *osc = NULL; /* to keep gcc happy */
149 struct osc_page *opg;
151 struct list_head list = LIST_HEAD_INIT(list);
153 struct cl_page_list *qin = &queue->c2_qin;
154 struct cl_page_list *qout = &queue->c2_qout;
155 unsigned int queued = 0;
159 unsigned int max_pages;
161 LASSERT(qin->pl_nr > 0);
163 CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);
165 osc = cl2osc(ios->cis_obj);
167 max_pages = cli->cl_max_pages_per_rpc;
169 cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
170 brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
173 * NOTE: here @page is a top-level page. This is done to avoid
174 * creation of sub-page-list.
176 cl_page_list_for_each_safe(page, tmp, qin) {
177 struct osc_async_page *oap;
183 opg = osc_cl_page_osc(page, osc);
185 LASSERT(osc == oap->oap_obj);
187 if (!list_empty(&oap->oap_pending_item) ||
188 !list_empty(&oap->oap_rpc_item)) {
189 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
195 result = cl_page_prep(env, io, page, crt);
198 if (result != -EALREADY)
201 * Handle -EALREADY error: for read case, the page is
202 * already in UPTODATE state; for write, the page
209 spin_lock(&oap->oap_lock);
210 oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
211 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
212 spin_unlock(&oap->oap_lock);
214 osc_page_submit(env, opg, crt, brw_flags);
215 list_add_tail(&oap->oap_pending_item, &list);
217 if (page->cp_sync_io != NULL)
218 cl_page_list_move(qout, qin, page);
220 cl_page_list_del(env, qin, page);
222 if (++queued == max_pages) {
224 result = osc_queue_sync_pages(env, osc, &list, cmd,
232 result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
234 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
235 return qout->pl_nr > 0 ? 0 : result;
239 * This is called when a page is accessed within file in a way that creates
240 * new page, if one were missing (i.e., if there were a hole at that place in
241 * the file, or accessed page is beyond the current file size).
243 * Expand stripe KMS if necessary.
245 static void osc_page_touch_at(const struct lu_env *env,
246 struct cl_object *obj, pgoff_t idx, size_t to)
248 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
249 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
253 /* offset within stripe */
254 kms = cl_offset(obj, idx) + to;
256 cl_object_attr_lock(obj);
260 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
264 CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
265 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
266 loi->loi_lvb.lvb_size);
268 attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
269 valid = CAT_MTIME | CAT_CTIME;
270 if (kms > loi->loi_kms) {
274 if (kms > loi->loi_lvb.lvb_size) {
275 attr->cat_size = kms;
278 cl_object_attr_update(env, obj, attr, valid);
279 cl_object_attr_unlock(obj);
282 static int osc_io_commit_async(const struct lu_env *env,
283 const struct cl_io_slice *ios,
284 struct cl_page_list *qin, int from, int to,
287 struct cl_io *io = ios->cis_io;
288 struct osc_io *oio = cl2osc_io(env, ios);
289 struct osc_object *osc = cl2osc(ios->cis_obj);
290 struct cl_page *page;
291 struct cl_page *last_page;
292 struct osc_page *opg;
296 LASSERT(qin->pl_nr > 0);
298 /* Handle partial page cases */
299 last_page = cl_page_list_last(qin);
300 if (oio->oi_lockless) {
301 page = cl_page_list_first(qin);
302 if (page == last_page) {
303 cl_page_clip(env, page, from, to);
306 cl_page_clip(env, page, from, PAGE_SIZE);
308 cl_page_clip(env, last_page, 0, to);
312 while (qin->pl_nr > 0) {
313 struct osc_async_page *oap;
315 page = cl_page_list_first(qin);
316 opg = osc_cl_page_osc(page, osc);
319 if (!list_empty(&oap->oap_rpc_item)) {
320 CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
326 /* The page may be already in dirty cache. */
327 if (list_empty(&oap->oap_pending_item)) {
328 result = osc_page_cache_add(env, &opg->ops_cl, io);
333 osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
334 page == last_page ? to : PAGE_SIZE);
336 cl_page_list_del(env, qin, page);
338 (*cb)(env, io, page);
339 /* Can't access page any more. Page can be in transfer and
340 * complete at any time. */
343 /* for sync write, kernel will wait for this page to be flushed before
344 * osc_io_end() is called, so release it earlier.
345 * for mkwrite(), it's known there is no further pages. */
346 if (cl_io_is_sync_write(io) && oio->oi_active != NULL) {
347 osc_extent_release(env, oio->oi_active);
348 oio->oi_active = NULL;
351 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
355 static int osc_io_rw_iter_init(const struct lu_env *env,
356 const struct cl_io_slice *ios)
358 struct cl_io *io = ios->cis_io;
359 struct osc_io *oio = osc_env_io(env);
360 struct osc_object *osc = cl2osc(ios->cis_obj);
361 struct client_obd *cli = osc_cli(osc);
363 unsigned long npages;
364 unsigned long max_pages;
367 if (cl_io_is_append(io))
370 npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT;
371 if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
374 max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
375 if (npages > max_pages)
378 c = atomic_long_read(cli->cl_lru_left);
379 if (c < npages && osc_lru_reclaim(cli) > 0)
380 c = atomic_long_read(cli->cl_lru_left);
381 while (c >= npages) {
382 if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
383 oio->oi_lru_reserved = npages;
386 c = atomic_long_read(cli->cl_lru_left);
392 static void osc_io_rw_iter_fini(const struct lu_env *env,
393 const struct cl_io_slice *ios)
395 struct osc_io *oio = osc_env_io(env);
396 struct osc_object *osc = cl2osc(ios->cis_obj);
397 struct client_obd *cli = osc_cli(osc);
399 if (oio->oi_lru_reserved > 0) {
400 atomic_long_add(oio->oi_lru_reserved, cli->cl_lru_left);
401 oio->oi_lru_reserved = 0;
403 oio->oi_write_osclock = NULL;
406 static int osc_io_fault_start(const struct lu_env *env,
407 const struct cl_io_slice *ios)
410 struct cl_fault_io *fio;
414 fio = &io->u.ci_fault;
415 CDEBUG(D_INFO, "%lu %d %zu\n",
416 fio->ft_index, fio->ft_writable, fio->ft_nob);
418 * If mapping is writeable, adjust kms to cover this page,
419 * but do not extend kms beyond actual file size.
422 if (fio->ft_writable)
423 osc_page_touch_at(env, ios->cis_obj,
424 fio->ft_index, fio->ft_nob);
428 static int osc_async_upcall(void *a, int rc)
430 struct osc_async_cbargs *args = a;
433 complete(&args->opc_sync);
438 * Checks that there are no pages being written in the extent being truncated.
440 static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
441 struct osc_page *ops , void *cbdata)
443 struct cl_page *page = ops->ops_cl.cpl_page;
444 struct osc_async_page *oap;
445 __u64 start = *(__u64 *)cbdata;
448 if (oap->oap_cmd & OBD_BRW_WRITE &&
449 !list_empty(&oap->oap_pending_item))
450 CL_PAGE_DEBUG(D_ERROR, env, page, "exists " LPU64 "/%s.\n",
451 start, current->comm);
453 if (PageLocked(page->cp_vmpage))
454 CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
455 ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
457 return CLP_GANG_OKAY;
460 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
461 struct osc_io *oio, __u64 size)
463 struct cl_object *clob;
467 clob = oio->oi_cl.cis_obj;
468 start = cl_index(clob, size);
469 partial = cl_offset(clob, start) < size;
472 * Complain if there are pages in the truncated region.
474 osc_page_gang_lookup(env, io, cl2osc(clob),
475 start + partial, CL_PAGE_EOF,
476 trunc_check_cb, (void *)&size);
479 static int osc_io_setattr_start(const struct lu_env *env,
480 const struct cl_io_slice *slice)
482 struct cl_io *io = slice->cis_io;
483 struct osc_io *oio = cl2osc_io(env, slice);
484 struct cl_object *obj = slice->cis_obj;
485 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
486 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
487 struct obdo *oa = &oio->oi_oa;
488 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
489 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
490 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
493 /* truncate cache dirty pages first */
494 if (cl_io_is_trunc(io))
495 result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);
497 if (result == 0 && oio->oi_lockless == 0) {
498 cl_object_attr_lock(obj);
499 result = cl_object_attr_get(env, obj, attr);
501 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
502 unsigned int cl_valid = 0;
504 if (ia_valid & ATTR_SIZE) {
505 attr->cat_size = attr->cat_kms = size;
506 cl_valid = (CAT_SIZE | CAT_KMS);
508 if (ia_valid & ATTR_MTIME_SET) {
509 attr->cat_mtime = lvb->lvb_mtime;
510 cl_valid |= CAT_MTIME;
512 if (ia_valid & ATTR_ATIME_SET) {
513 attr->cat_atime = lvb->lvb_atime;
514 cl_valid |= CAT_ATIME;
516 if (ia_valid & ATTR_CTIME_SET) {
517 attr->cat_ctime = lvb->lvb_ctime;
518 cl_valid |= CAT_CTIME;
520 result = cl_object_attr_update(env, obj, attr,
523 cl_object_attr_unlock(obj);
525 memset(oa, 0, sizeof(*oa));
527 oa->o_oi = loi->loi_oi;
528 obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
529 oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
530 oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
531 if (ia_valid & ATTR_CTIME) {
532 oa->o_valid |= OBD_MD_FLCTIME;
533 oa->o_ctime = attr->cat_ctime;
535 if (ia_valid & ATTR_ATIME) {
536 oa->o_valid |= OBD_MD_FLATIME;
537 oa->o_atime = attr->cat_atime;
539 if (ia_valid & ATTR_MTIME) {
540 oa->o_valid |= OBD_MD_FLMTIME;
541 oa->o_mtime = attr->cat_mtime;
543 if (ia_valid & ATTR_SIZE) {
545 oa->o_blocks = OBD_OBJECT_EOF;
546 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
548 if (oio->oi_lockless) {
549 oa->o_flags = OBD_FL_SRVLOCK;
550 oa->o_valid |= OBD_MD_FLFLAGS;
553 LASSERT(oio->oi_lockless == 0);
556 if (ia_valid & ATTR_ATTR_FLAG) {
557 oa->o_flags = io->u.ci_setattr.sa_attr_flags;
558 oa->o_valid |= OBD_MD_FLFLAGS;
561 init_completion(&cbargs->opc_sync);
563 if (ia_valid & ATTR_SIZE)
564 result = osc_punch_base(osc_export(cl2osc(obj)),
565 oa, osc_async_upcall,
566 cbargs, PTLRPCD_SET);
568 result = osc_setattr_async(osc_export(cl2osc(obj)),
569 oa, osc_async_upcall,
570 cbargs, PTLRPCD_SET);
572 cbargs->opc_rpc_sent = result == 0;
578 static void osc_io_setattr_end(const struct lu_env *env,
579 const struct cl_io_slice *slice)
581 struct cl_io *io = slice->cis_io;
582 struct osc_io *oio = cl2osc_io(env, slice);
583 struct cl_object *obj = slice->cis_obj;
584 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
587 if (cbargs->opc_rpc_sent) {
588 wait_for_completion(&cbargs->opc_sync);
589 result = io->ci_result = cbargs->opc_rc;
592 if (oio->oi_lockless) {
593 /* lockless truncate */
594 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
596 LASSERT(cl_io_is_trunc(io));
597 /* XXX: Need a lock. */
598 osd->od_stats.os_lockless_truncates++;
602 if (cl_io_is_trunc(io)) {
603 __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
604 osc_trunc_check(env, io, oio, size);
605 if (oio->oi_trunc != NULL) {
606 osc_cache_truncate_end(env, oio, cl2osc(obj));
607 oio->oi_trunc = NULL;
612 struct osc_data_version_args {
613 struct osc_io *dva_oio;
617 osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
620 struct osc_data_version_args *dva = arg;
621 struct osc_io *oio = dva->dva_oio;
622 const struct ost_body *body;
628 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
630 GOTO(out, rc = -EPROTO);
632 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
636 oio->oi_cbarg.opc_rc = rc;
637 complete(&oio->oi_cbarg.opc_sync);
642 static int osc_io_data_version_start(const struct lu_env *env,
643 const struct cl_io_slice *slice)
645 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
646 struct osc_io *oio = cl2osc_io(env, slice);
647 struct obdo *oa = &oio->oi_oa;
648 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
649 struct osc_object *obj = cl2osc(slice->cis_obj);
650 struct lov_oinfo *loi = obj->oo_oinfo;
651 struct obd_export *exp = osc_export(obj);
652 struct ptlrpc_request *req;
653 struct ost_body *body;
654 struct osc_data_version_args *dva;
658 memset(oa, 0, sizeof(*oa));
659 oa->o_oi = loi->loi_oi;
660 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
662 if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
663 oa->o_valid |= OBD_MD_FLFLAGS;
664 oa->o_flags |= OBD_FL_SRVLOCK;
665 if (dv->dv_flags & LL_DV_WR_FLUSH)
666 oa->o_flags |= OBD_FL_FLUSH;
669 init_completion(&cbargs->opc_sync);
671 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
675 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
677 ptlrpc_request_free(req);
681 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
682 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
684 ptlrpc_request_set_replen(req);
685 req->rq_interpret_reply = osc_data_version_interpret;
686 CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
687 dva = ptlrpc_req_async_args(req);
690 ptlrpcd_add_req(req);
695 static void osc_io_data_version_end(const struct lu_env *env,
696 const struct cl_io_slice *slice)
698 struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
699 struct osc_io *oio = cl2osc_io(env, slice);
700 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
703 wait_for_completion(&cbargs->opc_sync);
705 if (cbargs->opc_rc != 0) {
706 slice->cis_io->ci_result = cbargs->opc_rc;
707 } else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
708 slice->cis_io->ci_result = -EOPNOTSUPP;
710 dv->dv_data_version = oio->oi_oa.o_data_version;
711 slice->cis_io->ci_result = 0;
717 static int osc_io_read_start(const struct lu_env *env,
718 const struct cl_io_slice *slice)
720 struct cl_object *obj = slice->cis_obj;
721 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
725 if (!slice->cis_io->ci_noatime) {
726 cl_object_attr_lock(obj);
727 attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
728 rc = cl_object_attr_update(env, obj, attr, CAT_ATIME);
729 cl_object_attr_unlock(obj);
735 static int osc_io_write_start(const struct lu_env *env,
736 const struct cl_io_slice *slice)
738 struct cl_object *obj = slice->cis_obj;
739 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
743 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
744 cl_object_attr_lock(obj);
745 attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME);
746 rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
747 cl_object_attr_unlock(obj);
752 static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
753 struct cl_fsync_io *fio)
755 struct osc_io *oio = osc_env_io(env);
756 struct obdo *oa = &oio->oi_oa;
757 struct lov_oinfo *loi = obj->oo_oinfo;
758 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
762 memset(oa, 0, sizeof(*oa));
763 oa->o_oi = loi->loi_oi;
764 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
766 /* reload size abd blocks for start and end of sync range */
767 oa->o_size = fio->fi_start;
768 oa->o_blocks = fio->fi_end;
769 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
771 obdo_set_parent_fid(oa, fio->fi_fid);
773 init_completion(&cbargs->opc_sync);
775 rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
779 static int osc_io_fsync_start(const struct lu_env *env,
780 const struct cl_io_slice *slice)
782 struct cl_io *io = slice->cis_io;
783 struct cl_fsync_io *fio = &io->u.ci_fsync;
784 struct cl_object *obj = slice->cis_obj;
785 struct osc_object *osc = cl2osc(obj);
786 pgoff_t start = cl_index(obj, fio->fi_start);
787 pgoff_t end = cl_index(obj, fio->fi_end);
791 if (fio->fi_end == OBD_OBJECT_EOF)
794 result = osc_cache_writeback_range(env, osc, start, end, 0,
795 fio->fi_mode == CL_FSYNC_DISCARD);
797 fio->fi_nr_written += result;
800 if (fio->fi_mode == CL_FSYNC_ALL) {
803 /* we have to wait for writeback to finish before we can
804 * send OST_SYNC RPC. This is bad because it causes extents
805 * to be written osc by osc. However, we usually start
806 * writeback before CL_FSYNC_ALL so this won't have any real
808 rc = osc_cache_wait_range(env, osc, start, end);
811 rc = osc_fsync_ost(env, osc, fio);
819 static void osc_io_fsync_end(const struct lu_env *env,
820 const struct cl_io_slice *slice)
822 struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
823 struct cl_object *obj = slice->cis_obj;
824 pgoff_t start = cl_index(obj, fio->fi_start);
825 pgoff_t end = cl_index(obj, fio->fi_end);
828 if (fio->fi_mode == CL_FSYNC_LOCAL) {
829 result = osc_cache_wait_range(env, cl2osc(obj), start, end);
830 } else if (fio->fi_mode == CL_FSYNC_ALL) {
831 struct osc_io *oio = cl2osc_io(env, slice);
832 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
834 wait_for_completion(&cbargs->opc_sync);
836 result = cbargs->opc_rc;
838 slice->cis_io->ci_result = result;
841 static void osc_io_end(const struct lu_env *env,
842 const struct cl_io_slice *slice)
844 struct osc_io *oio = cl2osc_io(env, slice);
846 if (oio->oi_active) {
847 osc_extent_release(env, oio->oi_active);
848 oio->oi_active = NULL;
852 static const struct cl_io_operations osc_io_ops = {
855 .cio_start = osc_io_read_start,
856 .cio_fini = osc_io_fini
859 .cio_iter_init = osc_io_rw_iter_init,
860 .cio_iter_fini = osc_io_rw_iter_fini,
861 .cio_start = osc_io_write_start,
862 .cio_end = osc_io_end,
863 .cio_fini = osc_io_fini
866 .cio_start = osc_io_setattr_start,
867 .cio_end = osc_io_setattr_end
869 [CIT_DATA_VERSION] = {
870 .cio_start = osc_io_data_version_start,
871 .cio_end = osc_io_data_version_end,
874 .cio_start = osc_io_fault_start,
875 .cio_end = osc_io_end,
876 .cio_fini = osc_io_fini
879 .cio_start = osc_io_fsync_start,
880 .cio_end = osc_io_fsync_end,
881 .cio_fini = osc_io_fini
884 .cio_fini = osc_io_fini
887 .cio_read_ahead = osc_io_read_ahead,
888 .cio_submit = osc_io_submit,
889 .cio_commit_async = osc_io_commit_async
892 /*****************************************************************************
894 * Transfer operations.
898 static int osc_req_prep(const struct lu_env *env,
899 const struct cl_req_slice *slice)
904 static void osc_req_completion(const struct lu_env *env,
905 const struct cl_req_slice *slice, int ioret)
909 or = cl2osc_req(slice);
910 OBD_SLAB_FREE_PTR(or, osc_req_kmem);
914 * Implementation of struct cl_req_operations::cro_attr_set() for osc
915 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
918 static void osc_req_attr_set(const struct lu_env *env,
919 const struct cl_req_slice *slice,
920 const struct cl_object *obj,
921 struct cl_req_attr *attr, u64 flags)
923 struct lov_oinfo *oinfo;
924 struct cl_req *clerq;
925 struct cl_page *apage; /* _some_ page in @clerq */
926 struct ldlm_lock *lock; /* _some_ lock protecting @apage */
927 struct osc_page *opg;
931 oinfo = cl2osc(obj)->oo_oinfo;
932 lvb = &oinfo->loi_lvb;
935 if ((flags & OBD_MD_FLMTIME) != 0) {
936 oa->o_mtime = lvb->lvb_mtime;
937 oa->o_valid |= OBD_MD_FLMTIME;
939 if ((flags & OBD_MD_FLATIME) != 0) {
940 oa->o_atime = lvb->lvb_atime;
941 oa->o_valid |= OBD_MD_FLATIME;
943 if ((flags & OBD_MD_FLCTIME) != 0) {
944 oa->o_ctime = lvb->lvb_ctime;
945 oa->o_valid |= OBD_MD_FLCTIME;
947 if (flags & OBD_MD_FLGROUP) {
948 ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
949 oa->o_valid |= OBD_MD_FLGROUP;
951 if (flags & OBD_MD_FLID) {
952 ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
953 oa->o_valid |= OBD_MD_FLID;
955 if (flags & OBD_MD_FLHANDLE) {
956 clerq = slice->crs_req;
957 LASSERT(!list_empty(&clerq->crq_pages));
958 apage = container_of(clerq->crq_pages.next,
959 struct cl_page, cp_flight);
960 opg = osc_cl_page_osc(apage, NULL);
961 lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
962 OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
963 if (lock == NULL && !opg->ops_srvlock) {
964 struct ldlm_resource *res;
965 struct ldlm_res_id *resname;
967 CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
969 resname = &osc_env_info(env)->oti_resname;
970 ostid_build_res_name(&oinfo->loi_oi, resname);
971 res = ldlm_resource_get(
972 osc_export(cl2osc(obj))->exp_obd->obd_namespace,
973 NULL, resname, LDLM_EXTENT, 0);
974 ldlm_resource_dump(D_ERROR, res);
976 libcfs_debug_dumpstack(NULL);
980 /* check for lockless io. */
982 oa->o_handle = lock->l_remote_handle;
983 oa->o_valid |= OBD_MD_FLHANDLE;
989 static const struct cl_req_operations osc_req_ops = {
990 .cro_prep = osc_req_prep,
991 .cro_attr_set = osc_req_attr_set,
992 .cro_completion = osc_req_completion
996 int osc_io_init(const struct lu_env *env,
997 struct cl_object *obj, struct cl_io *io)
999 struct osc_io *oio = osc_env_io(env);
1001 CL_IO_SLICE_CLEAN(oio, oi_cl);
1002 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
1006 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
1012 OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, GFP_NOFS);
1014 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);