1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_OSC
43 #include "osc_cl_internal.h"
49 /*****************************************************************************
55 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
57 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
58 return container_of0(slice, struct osc_req, or_cl);
61 static struct osc_io *cl2osc_io(const struct lu_env *env,
62 const struct cl_io_slice *slice)
64 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
65 LINVRNT(oio == osc_env_io(env));
69 static struct osc_page *osc_cl_page_osc(struct cl_page *page)
71 const struct cl_page_slice *slice;
73 slice = cl_page_at(page, &osc_device_type);
74 LASSERT(slice != NULL);
76 return cl2osc_page(slice);
80 /*****************************************************************************
86 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
90 struct cl_page *osc_oap2cl_page(struct osc_async_page *oap)
92 return container_of(oap, struct osc_page, ops_oap)->ops_cl.cpl_page;
95 static void osc_io_unplug(const struct lu_env *env, struct osc_object *osc,
96 struct client_obd *cli)
98 loi_list_maint(cli, osc->oo_oinfo);
99 osc_check_rpcs(env, cli);
100 client_obd_list_unlock(&cli->cl_loi_list_lock);
104 * How many pages osc_io_submit() queues before checking whether an RPC is
107 #define OSC_QUEUE_GRAIN (32)
110 * An implementation of cl_io_operations::cio_io_submit() method for osc
111 * layer. Iterates over pages in the in-queue, prepares each for io by calling
112 * cl_page_prep() and then either submits them through osc_io_submit_page()
113 * or, if page is already submitted, changes osc flags through
114 * osc_set_async_flags_base().
116 static int osc_io_submit(const struct lu_env *env,
117 const struct cl_io_slice *ios,
118 enum cl_req_type crt, struct cl_2queue *queue,
119 enum cl_req_priority priority)
121 struct cl_page *page;
123 struct osc_object *osc0 = NULL;
124 struct client_obd *cli = NULL;
125 struct osc_object *osc = NULL; /* to keep gcc happy */
126 struct osc_page *opg;
129 struct cl_page_list *qin = &queue->c2_qin;
130 struct cl_page_list *qout = &queue->c2_qout;
134 LASSERT(qin->pl_nr > 0);
136 CDEBUG(D_INFO, "%i %i\n", qin->pl_nr, crt);
138 * NOTE: here @page is a top-level page. This is done to avoid
139 * creation of sub-page-list.
141 cl_page_list_for_each_safe(page, tmp, qin) {
142 struct osc_async_page *oap;
143 struct obd_export *exp;
149 opg = osc_cl_page_osc(page);
151 osc = cl2osc(opg->ops_cl.cpl_obj);
152 exp = osc_export(osc);
154 if (priority > CRP_NORMAL) {
155 spin_lock(&oap->oap_lock);
156 oap->oap_async_flags |= ASYNC_HP;
157 spin_unlock(&oap->oap_lock);
160 * This can be checked without cli->cl_loi_list_lock, because
161 * ->oap_*_item are always manipulated when the page is owned.
163 if (!list_empty(&oap->oap_urgent_item) ||
164 !list_empty(&oap->oap_rpc_item)) {
169 if (osc0 == NULL) { /* first iteration */
170 cli = &exp->exp_obd->u.cli;
172 } else /* check that all pages are against the same object
174 LASSERT(osc == osc0);
176 client_obd_list_lock(&cli->cl_loi_list_lock);
177 result = cl_page_prep(env, io, page, crt);
179 cl_page_list_move(qout, qin, page);
180 if (list_empty(&oap->oap_pending_item)) {
181 osc_io_submit_page(env, cl2osc_io(env, ios),
184 result = osc_set_async_flags_base(cli,
189 * bug 18881: we can't just break out here when
190 * error occurrs after cl_page_prep has been
191 * called against the page. The correct
192 * way is to call page's completion routine,
193 * as in osc_oap_interrupted. For simplicity,
194 * we just force osc_set_async_flags_base() to
197 LASSERT(result == 0);
199 opg->ops_submit_time = cfs_time_current();
202 if (result != -EALREADY)
205 * Handle -EALREADY error: for read case, the page is
206 * already in UPTODATE state; for write, the page
212 * Don't keep client_obd_list_lock() for too long.
214 * XXX client_obd_list lock has to be unlocked periodically to
215 * avoid soft-lockups that tend to happen otherwise (see bug
216 * 16651). On the other hand, osc_io_submit_page() queues a
217 * page with ASYNC_URGENT flag and so all pages queued up
218 * until this point are sent out immediately by
219 * osc_io_unplug() resulting in sub-optimal RPCs (sub-optimal
220 * RPCs only happen during `warm up' phase when less than
221 * cl_max_rpcs_in_flight RPCs are in flight). To balance these
222 * conflicting requirements, one might unplug once enough
223 * pages to form a large RPC were queued (i.e., use
224 * cli->cl_max_pages_per_rpc as OSC_QUEUE_GRAIN, see
225 * lop_makes_rpc()), or ignore soft-lockup issue altogether.
227 * XXX lock_need_resched() should be used here, but it is not
228 * available in the older of supported kernels.
230 if (queued > OSC_QUEUE_GRAIN || cfs_need_resched()) {
232 osc_io_unplug(env, osc, cli);
237 LASSERT(ergo(result == 0, cli != NULL));
238 LASSERT(ergo(result == 0, osc == osc0));
241 osc_io_unplug(env, osc, cli);
242 CDEBUG(D_INFO, "%i/%i %i\n", qin->pl_nr, qout->pl_nr, result);
243 return qout->pl_nr > 0 ? 0 : result;
246 static void osc_page_touch_at(const struct lu_env *env,
247 struct cl_object *obj, pgoff_t idx, unsigned to)
249 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
250 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
254 /* offset within stripe */
255 kms = cl_offset(obj, idx) + to;
257 cl_object_attr_lock(obj);
261 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
265 CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
266 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
267 loi->loi_lvb.lvb_size);
270 if (kms > loi->loi_kms) {
274 if (kms > loi->loi_lvb.lvb_size) {
275 attr->cat_size = kms;
278 cl_object_attr_set(env, obj, attr, valid);
279 cl_object_attr_unlock(obj);
283 * This is called when a page is accessed within file in a way that creates
284 * new page, if one were missing (i.e., if there were a hole at that place in
285 * the file, or accessed page is beyond the current file size). Examples:
286 * ->commit_write() and ->nopage() methods.
288 * Expand stripe KMS if necessary.
290 static void osc_page_touch(const struct lu_env *env,
291 struct osc_page *opage, unsigned to)
293 struct cl_page *page = opage->ops_cl.cpl_page;
294 struct cl_object *obj = opage->ops_cl.cpl_obj;
296 osc_page_touch_at(env, obj, page->cp_index, to);
300 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
302 * \retval -EIO transfer initiated against this osc will most likely fail
303 * \retval 0 transfer initiated against this osc will most likely succeed.
305 * The reason for this check is to immediately return an error to the caller
306 * in the case of a deactivated import. Note, that import can be deactivated
307 * later, while pages, dirtied by this IO, are still in the cache, but this is
308 * irrelevant, because that would still return an error to the application (if
309 * it does fsync), but many applications don't do fsync because of performance
310 * issues, and we wanted to return an -EIO at write time to notify the
313 static int osc_io_prepare_write(const struct lu_env *env,
314 const struct cl_io_slice *ios,
315 const struct cl_page_slice *slice,
316 unsigned from, unsigned to)
318 struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
319 struct obd_import *imp = class_exp2cliimp(dev->od_exp);
324 * This implements OBD_BRW_CHECK logic from old client.
327 RETURN(imp == NULL || imp->imp_invalid ? -EIO : 0);
330 static int osc_io_commit_write(const struct lu_env *env,
331 const struct cl_io_slice *ios,
332 const struct cl_page_slice *slice,
333 unsigned from, unsigned to)
335 struct osc_page *opg = cl2osc_page(slice);
336 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
337 struct osc_async_page *oap = &opg->ops_oap;
342 * XXX instead of calling osc_page_touch() here and in
343 * osc_io_fault_start() it might be more logical to introduce
344 * cl_page_touch() method, that generic cl_io_commit_write() and page
347 osc_page_touch(env, cl2osc_page(slice), to);
348 if (!client_is_remote(osc_export(obj)) &&
349 cfs_capable(CFS_CAP_SYS_RESOURCE))
350 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
355 static int osc_io_fault_start(const struct lu_env *env,
356 const struct cl_io_slice *ios)
359 struct cl_fault_io *fio;
364 fio = &io->u.ci_fault;
365 CDEBUG(D_INFO, "%lu %i %i\n",
366 fio->ft_index, fio->ft_writable, fio->ft_nob);
368 * If mapping is writeable, adjust kms to cover this page,
369 * but do not extend kms beyond actual file size.
372 if (fio->ft_writable)
373 osc_page_touch_at(env, ios->cis_obj,
374 fio->ft_index, fio->ft_nob);
378 static int osc_punch_upcall(void *a, int rc)
380 struct osc_punch_cbargs *args = a;
383 complete(&args->opc_sync);
387 /* Disable osc_trunc_check() because it is naturally race between read and
388 * truncate. See bug 20645 for details.
390 #if 0 && defined(__KERNEL__)
392 * Checks that there are no pages being written in the extent being truncated.
394 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
395 struct osc_io *oio, size_t size)
398 struct osc_object *obj;
399 struct cl_object *clob;
400 struct cl_page *page;
401 struct cl_page_list *list;
405 clob = oio->oi_cl.cis_obj;
407 start = cl_index(clob, size);
408 partial = cl_offset(clob, start) < size;
409 list = &osc_env_info(env)->oti_plist;
412 * Complain if there are pages in the truncated region.
414 * XXX this is quite expensive check.
416 cl_page_list_init(list);
417 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, list, 0);
419 cl_page_list_for_each(page, list)
420 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %lu\n", start);
422 cl_page_list_disown(env, io, list);
423 cl_page_list_fini(env, list);
425 spin_lock(&obj->oo_seatbelt);
426 list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE], ops_inflight) {
427 page = cp->ops_cl.cpl_page;
428 if (page->cp_index >= start + partial) {
429 cfs_task_t *submitter;
431 submitter = cp->ops_submitter;
433 * XXX Linux specific debugging stuff.
435 CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%i %lu\n",
436 submitter->comm, submitter->pid, start);
437 libcfs_debug_dumpstack(submitter);
440 spin_unlock(&obj->oo_seatbelt);
442 #else /* __KERNEL__ */
443 # define osc_trunc_check(env, io, oio, size) do {;} while (0)
446 static int osc_io_trunc_start(const struct lu_env *env,
447 const struct cl_io_slice *slice)
449 struct cl_io *io = slice->cis_io;
450 struct osc_io *oio = cl2osc_io(env, slice);
451 struct cl_object *obj = slice->cis_obj;
452 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
453 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
454 struct obdo *oa = &oio->oi_oa;
455 struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
456 struct obd_capa *capa;
457 loff_t size = io->u.ci_truncate.tr_size;
460 osc_trunc_check(env, io, oio, size);
462 if (oio->oi_lockless == 0) {
463 cl_object_attr_lock(obj);
464 result = cl_object_attr_get(env, obj, attr);
466 attr->cat_size = attr->cat_kms = size;
467 result = cl_object_attr_set(env, obj, attr,
470 cl_object_attr_unlock(obj);
473 memset(oa, 0, sizeof(*oa));
475 oa->o_id = loi->loi_id;
476 oa->o_gr = loi->loi_gr;
477 oa->o_mtime = attr->cat_mtime;
478 oa->o_atime = attr->cat_atime;
479 oa->o_ctime = attr->cat_ctime;
480 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
481 OBD_MD_FLCTIME | OBD_MD_FLMTIME;
482 if (oio->oi_lockless) {
483 oa->o_flags = OBD_FL_TRUNCLOCK;
484 oa->o_valid |= OBD_MD_FLFLAGS;
487 oa->o_blocks = OBD_OBJECT_EOF;
488 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
490 capa = io->u.ci_truncate.tr_capa;
491 init_completion(&cbargs->opc_sync);
492 result = osc_punch_base(osc_export(cl2osc(obj)), oa, capa,
493 osc_punch_upcall, cbargs, PTLRPCD_SET);
498 static void osc_io_trunc_end(const struct lu_env *env,
499 const struct cl_io_slice *slice)
501 struct cl_io *io = slice->cis_io;
502 struct osc_io *oio = cl2osc_io(env, slice);
503 struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
504 struct obdo *oa = &oio->oi_oa;
507 wait_for_completion(&cbargs->opc_sync);
509 result = io->ci_result = cbargs->opc_rc;
511 struct cl_object *obj = slice->cis_obj;
512 if (oio->oi_lockless == 0) {
513 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
516 /* Update kms & size */
517 if (oa->o_valid & OBD_MD_FLSIZE) {
518 attr->cat_size = oa->o_size;
519 attr->cat_kms = oa->o_size;
520 valid |= CAT_KMS|CAT_SIZE;
522 if (oa->o_valid & OBD_MD_FLBLOCKS) {
523 attr->cat_blocks = oa->o_blocks;
526 if (oa->o_valid & OBD_MD_FLMTIME) {
527 attr->cat_mtime = oa->o_mtime;
530 if (oa->o_valid & OBD_MD_FLCTIME) {
531 attr->cat_ctime = oa->o_ctime;
534 if (oa->o_valid & OBD_MD_FLATIME) {
535 attr->cat_atime = oa->o_atime;
538 cl_object_attr_lock(obj);
539 result = cl_object_attr_set(env, obj, attr, valid);
540 cl_object_attr_unlock(obj);
541 } else { /* lockless truncate */
542 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
543 /* XXX: Need a lock. */
544 osd->od_stats.os_lockless_truncates++;
551 static const struct cl_io_operations osc_io_ops = {
554 .cio_fini = osc_io_fini
557 .cio_fini = osc_io_fini
560 .cio_start = osc_io_trunc_start,
561 .cio_end = osc_io_trunc_end
564 .cio_fini = osc_io_fini,
565 .cio_start = osc_io_fault_start
568 .cio_fini = osc_io_fini
573 .cio_submit = osc_io_submit
576 .cio_submit = osc_io_submit
579 .cio_prepare_write = osc_io_prepare_write,
580 .cio_commit_write = osc_io_commit_write
583 /*****************************************************************************
585 * Transfer operations.
589 static int osc_req_prep(const struct lu_env *env,
590 const struct cl_req_slice *slice)
595 static void osc_req_completion(const struct lu_env *env,
596 const struct cl_req_slice *slice, int ioret)
600 or = cl2osc_req(slice);
601 OBD_SLAB_FREE_PTR(or, osc_req_kmem);
605 * Implementation of struct cl_req_operations::cro_attr_set() for osc
606 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_gr
609 static void osc_req_attr_set(const struct lu_env *env,
610 const struct cl_req_slice *slice,
611 const struct cl_object *obj,
612 struct cl_req_attr *attr, obd_valid flags)
614 struct lov_oinfo *oinfo;
615 struct cl_req *clerq;
616 struct cl_page *apage; /* _some_ page in @clerq */
617 struct cl_lock *lock; /* _some_ lock protecting @apage */
618 struct osc_lock *olck;
619 struct osc_page *opg;
623 oinfo = cl2osc(obj)->oo_oinfo;
624 if (flags & OBD_MD_FLID) {
625 oa->o_id = oinfo->loi_id;
626 oa->o_valid |= OBD_MD_FLID;
628 if (flags & OBD_MD_FLGROUP) {
629 oa->o_gr = oinfo->loi_gr;
630 oa->o_valid |= OBD_MD_FLGROUP;
632 if (flags & OBD_MD_FLHANDLE) {
633 clerq = slice->crs_req;
634 LASSERT(!list_empty(&clerq->crq_pages));
635 apage = container_of(clerq->crq_pages.next,
636 struct cl_page, cp_flight);
637 opg = osc_cl_page_osc(apage);
638 apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
639 lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
641 struct cl_object_header *head;
642 struct cl_lock *scan;
644 head = cl_object_header(apage->cp_obj);
645 list_for_each_entry(scan, &head->coh_locks, cll_linkage)
646 CL_LOCK_DEBUG(D_ERROR, env, scan,
648 CL_PAGE_DEBUG(D_ERROR, env, apage,
649 "dump uncover page!\n");
650 libcfs_debug_dumpstack(NULL);
654 olck = osc_lock_at(lock);
655 LASSERT(olck != NULL);
656 /* check for lockless io. */
657 if (olck->ols_lock != NULL) {
658 oa->o_handle = olck->ols_lock->l_remote_handle;
659 oa->o_valid |= OBD_MD_FLHANDLE;
661 cl_lock_put(env, lock);
665 static const struct cl_req_operations osc_req_ops = {
666 .cro_prep = osc_req_prep,
667 .cro_attr_set = osc_req_attr_set,
668 .cro_completion = osc_req_completion
672 int osc_io_init(const struct lu_env *env,
673 struct cl_object *obj, struct cl_io *io)
675 struct osc_io *oio = osc_env_io(env);
677 CL_IO_SLICE_CLEAN(oio, oi_cl);
678 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
682 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
688 OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO);
690 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);