4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_OSC
44 #include "osc_cl_internal.h"
50 /*****************************************************************************
56 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
58 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
59 return container_of0(slice, struct osc_req, or_cl);
62 static struct osc_io *cl2osc_io(const struct lu_env *env,
63 const struct cl_io_slice *slice)
65 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
66 LINVRNT(oio == osc_env_io(env));
70 static struct osc_page *osc_cl_page_osc(struct cl_page *page)
72 const struct cl_page_slice *slice;
74 slice = cl_page_at(page, &osc_device_type);
75 LASSERT(slice != NULL);
77 return cl2osc_page(slice);
81 /*****************************************************************************
87 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
91 struct cl_page *osc_oap2cl_page(struct osc_async_page *oap)
93 return container_of(oap, struct osc_page, ops_oap)->ops_cl.cpl_page;
97 * An implementation of cl_io_operations::cio_io_submit() method for osc
98 * layer. Iterates over pages in the in-queue, prepares each for io by calling
99 * cl_page_prep() and then either submits them through osc_io_submit_page()
100 * or, if page is already submitted, changes osc flags through
101 * osc_set_async_flags().
103 static int osc_io_submit(const struct lu_env *env,
104 const struct cl_io_slice *ios,
105 enum cl_req_type crt, struct cl_2queue *queue,
106 enum cl_req_priority priority)
108 struct cl_page *page;
110 struct osc_object *osc0 = NULL;
111 struct client_obd *cli = NULL;
112 struct osc_object *osc = NULL; /* to keep gcc happy */
113 struct osc_page *opg;
116 struct cl_page_list *qin = &queue->c2_qin;
117 struct cl_page_list *qout = &queue->c2_qout;
121 LASSERT(qin->pl_nr > 0);
123 CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, crt);
125 * NOTE: here @page is a top-level page. This is done to avoid
126 * creation of sub-page-list.
128 cl_page_list_for_each_safe(page, tmp, qin) {
129 struct osc_async_page *oap;
130 struct obd_export *exp;
136 opg = osc_cl_page_osc(page);
138 osc = cl2osc(opg->ops_cl.cpl_obj);
139 exp = osc_export(osc);
141 if (priority > CRP_NORMAL) {
142 cfs_spin_lock(&oap->oap_lock);
143 oap->oap_async_flags |= ASYNC_HP;
144 cfs_spin_unlock(&oap->oap_lock);
147 if (osc0 == NULL) { /* first iteration */
148 cli = &exp->exp_obd->u.cli;
150 client_obd_list_lock(&cli->cl_loi_list_lock);
151 } else /* check that all pages are against the same object
153 LASSERT(osc == osc0);
155 if (!cfs_list_empty(&oap->oap_urgent_item) ||
156 !cfs_list_empty(&oap->oap_rpc_item)) {
161 result = cl_page_prep(env, io, page, crt);
164 cl_page_list_move(qout, qin, page);
165 if (cfs_list_empty(&oap->oap_pending_item)) {
166 osc_io_submit_page(env, cl2osc_io(env, ios),
169 result = osc_set_async_flags(osc, opg,
172 * bug 18881: we can't just break out here when
173 * error occurs after cl_page_prep has been
174 * called against the page. The correct
175 * way is to call page's completion routine,
176 * as in osc_oap_interrupted. For simplicity,
177 * we just force osc_set_async_flags() to
180 LASSERT(result == 0);
182 opg->ops_submit_time = cfs_time_current();
185 if (result != -EALREADY)
188 * Handle -EALREADY error: for read case, the page is
189 * already in UPTODATE state; for write, the page
196 * We might hold client_obd_list_lock() for too long and cause
197 * soft-lockups (see bug 16651). But on the other hand, pages
198 * are queued here with ASYNC_URGENT flag, thus will be sent
199 * out immediately once osc_io_unplug() be called, possibly
200 * resulting sub-optimal RPCs.
202 * We think creating optimal-sized RPCs is more important than
203 * avoiding the transient soft-lockups, plus I believe the
204 * soft-locks only happen in full debug testing.
208 LASSERT(ergo(result == 0, cli != NULL));
209 LASSERT(ergo(result == 0, osc == osc0));
212 osc_io_unplug(env, cli, osc, PDL_POLICY_ROUND);
214 client_obd_list_unlock(&cli->cl_loi_list_lock);
215 CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
216 return qout->pl_nr > 0 ? 0 : result;
219 static void osc_page_touch_at(const struct lu_env *env,
220 struct cl_object *obj, pgoff_t idx, unsigned to)
222 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
223 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
227 /* offset within stripe */
228 kms = cl_offset(obj, idx) + to;
230 cl_object_attr_lock(obj);
234 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
238 CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
239 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
240 loi->loi_lvb.lvb_size);
243 if (kms > loi->loi_kms) {
247 if (kms > loi->loi_lvb.lvb_size) {
248 attr->cat_size = kms;
251 cl_object_attr_set(env, obj, attr, valid);
252 cl_object_attr_unlock(obj);
256 * This is called when a page is accessed within file in a way that creates
257 * new page, if one were missing (i.e., if there were a hole at that place in
258 * the file, or accessed page is beyond the current file size). Examples:
259 * ->commit_write() and ->nopage() methods.
261 * Expand stripe KMS if necessary.
263 static void osc_page_touch(const struct lu_env *env,
264 struct osc_page *opage, unsigned to)
266 struct cl_page *page = opage->ops_cl.cpl_page;
267 struct cl_object *obj = opage->ops_cl.cpl_obj;
269 osc_page_touch_at(env, obj, page->cp_index, to);
273 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
275 * \retval -EIO transfer initiated against this osc will most likely fail
276 * \retval 0 transfer initiated against this osc will most likely succeed.
278 * The reason for this check is to immediately return an error to the caller
279 * in the case of a deactivated import. Note, that import can be deactivated
280 * later, while pages, dirtied by this IO, are still in the cache, but this is
281 * irrelevant, because that would still return an error to the application (if
282 * it does fsync), but many applications don't do fsync because of performance
283 * issues, and we wanted to return an -EIO at write time to notify the
286 static int osc_io_prepare_write(const struct lu_env *env,
287 const struct cl_io_slice *ios,
288 const struct cl_page_slice *slice,
289 unsigned from, unsigned to)
291 struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
292 struct obd_import *imp = class_exp2cliimp(dev->od_exp);
293 struct osc_io *oio = cl2osc_io(env, ios);
298 * This implements OBD_BRW_CHECK logic from old client.
301 if (imp == NULL || imp->imp_invalid)
303 if (result == 0 && oio->oi_lockless)
304 /* this page contains `invalid' data, but who cares?
305 * nobody can access the invalid data.
306 * in osc_io_commit_write(), we're going to write exact
307 * [from, to) bytes of this page to OST. -jay */
308 cl_page_export(env, slice->cpl_page, 1);
313 static int osc_io_commit_write(const struct lu_env *env,
314 const struct cl_io_slice *ios,
315 const struct cl_page_slice *slice,
316 unsigned from, unsigned to)
318 struct osc_io *oio = cl2osc_io(env, ios);
319 struct osc_page *opg = cl2osc_page(slice);
320 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
321 struct osc_async_page *oap = &opg->ops_oap;
326 * XXX instead of calling osc_page_touch() here and in
327 * osc_io_fault_start() it might be more logical to introduce
328 * cl_page_touch() method, that generic cl_io_commit_write() and page
331 osc_page_touch(env, cl2osc_page(slice), to);
332 if (!client_is_remote(osc_export(obj)) &&
333 cfs_capable(CFS_CAP_SYS_RESOURCE))
334 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
336 if (oio->oi_lockless)
337 /* see osc_io_prepare_write() for lockless io handling. */
338 cl_page_clip(env, slice->cpl_page, from, to);
343 static int osc_io_fault_start(const struct lu_env *env,
344 const struct cl_io_slice *ios)
347 struct cl_fault_io *fio;
352 fio = &io->u.ci_fault;
353 CDEBUG(D_INFO, "%lu %d %d\n",
354 fio->ft_index, fio->ft_writable, fio->ft_nob);
356 * If mapping is writeable, adjust kms to cover this page,
357 * but do not extend kms beyond actual file size.
360 if (fio->ft_writable)
361 osc_page_touch_at(env, ios->cis_obj,
362 fio->ft_index, fio->ft_nob);
366 static int osc_async_upcall(void *a, int rc)
368 struct osc_async_cbargs *args = a;
371 cfs_complete(&args->opc_sync);
375 /* Disable osc_trunc_check() because it is naturally race between read and
376 * truncate. See bug 20645 for details.
378 #if 0 && defined(__KERNEL__)
380 * Checks that there are no pages being written in the extent being truncated.
382 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
383 struct osc_io *oio, size_t size)
386 struct osc_object *obj;
387 struct cl_object *clob;
388 struct cl_page *page;
389 struct cl_page_list *list;
393 clob = oio->oi_cl.cis_obj;
395 start = cl_index(clob, size);
396 partial = cl_offset(clob, start) < size;
397 list = &osc_env_info(env)->oti_plist;
400 * Complain if there are pages in the truncated region.
402 * XXX this is quite expensive check.
404 cl_page_list_init(list);
405 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, list);
407 cl_page_list_for_each(page, list)
408 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %lu\n", start);
410 cl_page_list_disown(env, io, list);
411 cl_page_list_fini(env, list);
413 cfs_spin_lock(&obj->oo_seatbelt);
414 cfs_list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE],
416 page = cp->ops_cl.cpl_page;
417 if (page->cp_index >= start + partial) {
418 cfs_task_t *submitter;
420 submitter = cp->ops_submitter;
422 * XXX Linux specific debugging stuff.
424 CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%d %lu\n",
425 submitter->comm, submitter->pid, start);
426 libcfs_debug_dumpstack(submitter);
429 cfs_spin_unlock(&obj->oo_seatbelt);
431 #else /* __KERNEL__ */
432 # define osc_trunc_check(env, io, oio, size) do {;} while (0)
435 static int osc_io_setattr_start(const struct lu_env *env,
436 const struct cl_io_slice *slice)
438 struct cl_io *io = slice->cis_io;
439 struct osc_io *oio = cl2osc_io(env, slice);
440 struct cl_object *obj = slice->cis_obj;
441 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
442 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
443 struct obdo *oa = &oio->oi_oa;
444 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
445 loff_t size = io->u.ci_setattr.sa_attr.lvb_size;
446 unsigned int ia_valid = io->u.ci_setattr.sa_valid;
448 struct obd_info oinfo = { { { 0 } } };
450 if (ia_valid & ATTR_SIZE)
451 osc_trunc_check(env, io, oio, size);
453 if (oio->oi_lockless == 0) {
454 cl_object_attr_lock(obj);
455 result = cl_object_attr_get(env, obj, attr);
457 unsigned int cl_valid = 0;
459 if (ia_valid & ATTR_SIZE) {
460 attr->cat_size = attr->cat_kms = size;
461 cl_valid = (CAT_SIZE | CAT_KMS);
463 if (ia_valid & ATTR_MTIME_SET) {
464 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
465 cl_valid |= CAT_MTIME;
467 if (ia_valid & ATTR_ATIME_SET) {
468 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
469 cl_valid |= CAT_ATIME;
471 if (ia_valid & ATTR_CTIME_SET) {
472 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
473 cl_valid |= CAT_CTIME;
475 result = cl_object_attr_set(env, obj, attr, cl_valid);
477 cl_object_attr_unlock(obj);
479 memset(oa, 0, sizeof(*oa));
481 oa->o_id = loi->loi_id;
482 oa->o_seq = loi->loi_seq;
483 oa->o_mtime = attr->cat_mtime;
484 oa->o_atime = attr->cat_atime;
485 oa->o_ctime = attr->cat_ctime;
486 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
487 OBD_MD_FLCTIME | OBD_MD_FLMTIME;
488 if (ia_valid & ATTR_SIZE) {
490 oa->o_blocks = OBD_OBJECT_EOF;
491 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
493 if (oio->oi_lockless) {
494 oa->o_flags = OBD_FL_SRVLOCK;
495 oa->o_valid |= OBD_MD_FLFLAGS;
498 LASSERT(oio->oi_lockless == 0);
502 oinfo.oi_capa = io->u.ci_setattr.sa_capa;
503 cfs_init_completion(&cbargs->opc_sync);
505 if (ia_valid & ATTR_SIZE)
506 result = osc_punch_base(osc_export(cl2osc(obj)),
507 &oinfo, osc_async_upcall,
508 cbargs, PTLRPCD_SET);
510 result = osc_setattr_async_base(osc_export(cl2osc(obj)),
513 cbargs, PTLRPCD_SET);
518 static void osc_io_setattr_end(const struct lu_env *env,
519 const struct cl_io_slice *slice)
521 struct cl_io *io = slice->cis_io;
522 struct osc_io *oio = cl2osc_io(env, slice);
523 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
526 cfs_wait_for_completion(&cbargs->opc_sync);
528 result = io->ci_result = cbargs->opc_rc;
530 struct cl_object *obj = slice->cis_obj;
531 if (oio->oi_lockless) {
532 /* lockless truncate */
533 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
535 LASSERT(cl_io_is_trunc(io));
536 /* XXX: Need a lock. */
537 osd->od_stats.os_lockless_truncates++;
542 static int osc_io_read_start(const struct lu_env *env,
543 const struct cl_io_slice *slice)
545 struct osc_io *oio = cl2osc_io(env, slice);
546 struct cl_object *obj = slice->cis_obj;
547 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
551 if (oio->oi_lockless == 0) {
552 cl_object_attr_lock(obj);
553 result = cl_object_attr_get(env, obj, attr);
555 attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
556 result = cl_object_attr_set(env, obj, attr,
559 cl_object_attr_unlock(obj);
564 static int osc_io_write_start(const struct lu_env *env,
565 const struct cl_io_slice *slice)
567 struct osc_io *oio = cl2osc_io(env, slice);
568 struct cl_object *obj = slice->cis_obj;
569 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
573 if (oio->oi_lockless == 0) {
574 cl_object_attr_lock(obj);
575 result = cl_object_attr_get(env, obj, attr);
577 attr->cat_mtime = attr->cat_ctime =
578 LTIME_S(CFS_CURRENT_TIME);
579 result = cl_object_attr_set(env, obj, attr,
580 CAT_MTIME | CAT_CTIME);
582 cl_object_attr_unlock(obj);
587 static int osc_io_fsync_start(const struct lu_env *env,
588 const struct cl_io_slice *slice)
590 struct cl_io *io = slice->cis_io;
591 struct osc_io *oio = cl2osc_io(env, slice);
592 struct obdo *oa = &oio->oi_oa;
593 struct obd_info *oinfo = &oio->oi_info;
594 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
595 struct cl_object *obj = slice->cis_obj;
596 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
600 memset(oa, 0, sizeof(*oa));
601 oa->o_id = loi->loi_id;
602 oa->o_seq = loi->loi_seq;
603 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
605 /* reload size and blocks for start and end of sync range */
606 oa->o_size = io->u.ci_fsync.fi_start;
607 oa->o_blocks = io->u.ci_fsync.fi_end;
608 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
610 obdo_set_parent_fid(oa, io->u.ci_fsync.fi_fid);
612 memset(oinfo, 0, sizeof(*oinfo));
614 oinfo->oi_capa = io->u.ci_fsync.fi_capa;
615 cfs_init_completion(&cbargs->opc_sync);
617 result = osc_sync_base(osc_export(cl2osc(obj)), oinfo,
618 osc_async_upcall, cbargs, PTLRPCD_SET);
622 static void osc_io_fsync_end(const struct lu_env *env,
623 const struct cl_io_slice *slice)
625 struct cl_io *io = slice->cis_io;
626 struct osc_io *oio = cl2osc_io(env, slice);
627 struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
629 cfs_wait_for_completion(&cbargs->opc_sync);
630 io->ci_result = cbargs->opc_rc;
633 static const struct cl_io_operations osc_io_ops = {
636 .cio_start = osc_io_read_start,
637 .cio_fini = osc_io_fini
640 .cio_start = osc_io_write_start,
641 .cio_fini = osc_io_fini
644 .cio_start = osc_io_setattr_start,
645 .cio_end = osc_io_setattr_end
648 .cio_fini = osc_io_fini,
649 .cio_start = osc_io_fault_start
652 .cio_start = osc_io_fsync_start,
653 .cio_end = osc_io_fsync_end,
654 .cio_fini = osc_io_fini
657 .cio_fini = osc_io_fini
662 .cio_submit = osc_io_submit
665 .cio_submit = osc_io_submit
668 .cio_prepare_write = osc_io_prepare_write,
669 .cio_commit_write = osc_io_commit_write
672 /*****************************************************************************
674 * Transfer operations.
678 static int osc_req_prep(const struct lu_env *env,
679 const struct cl_req_slice *slice)
684 static void osc_req_completion(const struct lu_env *env,
685 const struct cl_req_slice *slice, int ioret)
689 or = cl2osc_req(slice);
690 OBD_SLAB_FREE_PTR(or, osc_req_kmem);
694 * Implementation of struct cl_req_operations::cro_attr_set() for osc
695 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
698 static void osc_req_attr_set(const struct lu_env *env,
699 const struct cl_req_slice *slice,
700 const struct cl_object *obj,
701 struct cl_req_attr *attr, obd_valid flags)
703 struct lov_oinfo *oinfo;
704 struct cl_req *clerq;
705 struct cl_page *apage; /* _some_ page in @clerq */
706 struct cl_lock *lock; /* _some_ lock protecting @apage */
707 struct osc_lock *olck;
708 struct osc_page *opg;
712 oinfo = cl2osc(obj)->oo_oinfo;
713 if (flags & OBD_MD_FLID) {
714 oa->o_id = oinfo->loi_id;
715 oa->o_valid |= OBD_MD_FLID;
717 if (flags & OBD_MD_FLGROUP) {
718 oa->o_seq = oinfo->loi_seq;
719 oa->o_valid |= OBD_MD_FLGROUP;
721 if (flags & OBD_MD_FLHANDLE) {
722 clerq = slice->crs_req;
723 LASSERT(!cfs_list_empty(&clerq->crq_pages));
724 apage = container_of(clerq->crq_pages.next,
725 struct cl_page, cp_flight);
726 opg = osc_cl_page_osc(apage);
727 apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
728 lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
730 struct cl_object_header *head;
731 struct cl_lock *scan;
733 head = cl_object_header(apage->cp_obj);
734 cfs_list_for_each_entry(scan, &head->coh_locks,
736 CL_LOCK_DEBUG(D_ERROR, env, scan,
738 CL_PAGE_DEBUG(D_ERROR, env, apage,
739 "dump uncover page!\n");
740 libcfs_debug_dumpstack(NULL);
744 olck = osc_lock_at(lock);
745 LASSERT(olck != NULL);
746 LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
747 /* check for lockless io. */
748 if (olck->ols_lock != NULL) {
749 oa->o_handle = olck->ols_lock->l_remote_handle;
750 oa->o_valid |= OBD_MD_FLHANDLE;
752 cl_lock_put(env, lock);
756 static const struct cl_req_operations osc_req_ops = {
757 .cro_prep = osc_req_prep,
758 .cro_attr_set = osc_req_attr_set,
759 .cro_completion = osc_req_completion
763 int osc_io_init(const struct lu_env *env,
764 struct cl_object *obj, struct cl_io *io)
766 struct osc_io *oio = osc_env_io(env);
768 CL_IO_SLICE_CLEAN(oio, oi_cl);
769 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
773 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
779 OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO);
781 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);