1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_io for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 /** \addtogroup osc osc @{ */
43 #define DEBUG_SUBSYSTEM S_OSC
45 #include "osc_cl_internal.h"
47 /*****************************************************************************
53 static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
55 LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
56 return container_of0(slice, struct osc_req, or_cl);
59 static struct osc_io *cl2osc_io(const struct lu_env *env,
60 const struct cl_io_slice *slice)
62 struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
63 LINVRNT(oio == osc_env_io(env));
67 static struct osc_page *osc_cl_page_osc(struct cl_page *page)
69 const struct cl_page_slice *slice;
71 slice = cl_page_at(page, &osc_device_type);
72 LASSERT(slice != NULL);
74 return cl2osc_page(slice);
78 /*****************************************************************************
84 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
88 struct cl_page *osc_oap2cl_page(struct osc_async_page *oap)
90 return container_of(oap, struct osc_page, ops_oap)->ops_cl.cpl_page;
93 static void osc_io_unplug(const struct lu_env *env, struct osc_object *osc,
94 struct client_obd *cli)
96 loi_list_maint(cli, osc->oo_oinfo);
97 osc_check_rpcs(env, cli);
98 client_obd_list_unlock(&cli->cl_loi_list_lock);
102 * How many pages osc_io_submit() queues before checking whether an RPC is
105 #define OSC_QUEUE_GRAIN (32)
108 * An implementation of cl_io_operations::cio_io_submit() method for osc
109 * layer. Iterates over pages in the in-queue, prepares each for io by calling
110 * cl_page_prep() and then either submits them through osc_io_submit_page()
111 * or, if page is already submitted, changes osc flags through
112 * osc_set_async_flags_base().
114 static int osc_io_submit(const struct lu_env *env,
115 const struct cl_io_slice *ios,
116 enum cl_req_type crt, struct cl_2queue *queue)
118 struct cl_page *page;
120 struct osc_object *osc0 = NULL;
121 struct client_obd *cli = NULL;
122 struct osc_object *osc = NULL; /* to keep gcc happy */
123 struct osc_page *opg;
126 struct cl_page_list *qin = &queue->c2_qin;
127 struct cl_page_list *qout = &queue->c2_qout;
131 LASSERT(qin->pl_nr > 0);
133 CDEBUG(D_INFO, "%i %i\n", qin->pl_nr, crt);
135 * NOTE: here @page is a top-level page. This is done to avoid
136 * creation of sub-page-list.
138 cl_page_list_for_each_safe(page, tmp, qin) {
139 struct osc_async_page *oap;
140 struct obd_export *exp;
146 opg = osc_cl_page_osc(page);
148 osc = cl2osc(opg->ops_cl.cpl_obj);
149 exp = osc_export(osc);
152 * This can be checked without cli->cl_loi_list_lock, because
153 * ->oap_*_item are always manipulated when the page is owned.
155 if (!list_empty(&oap->oap_urgent_item) ||
156 !list_empty(&oap->oap_rpc_item)) {
161 if (osc0 == NULL) { /* first iteration */
162 cli = &exp->exp_obd->u.cli;
164 } else /* check that all pages are against the same object
166 LASSERT(osc == osc0);
168 client_obd_list_lock(&cli->cl_loi_list_lock);
169 result = cl_page_prep(env, io, page, crt);
171 cl_page_list_move(qout, qin, page);
172 if (list_empty(&oap->oap_pending_item)) {
173 osc_io_submit_page(env, cl2osc_io(env, ios),
176 result = osc_set_async_flags_base(cli,
185 if (result != -EALREADY)
188 * Handle -EALREADY error: for read case, the page is
189 * already in UPTODATE state; for write, the page
195 * Don't keep client_obd_list_lock() for too long.
197 * XXX client_obd_list lock has to be unlocked periodically to
198 * avoid soft-lockups that tend to happen otherwise (see bug
199 * 16651). On the other hand, osc_io_submit_page() queues a
200 * page with ASYNC_URGENT flag and so all pages queued up
201 * until this point are sent out immediately by
202 * osc_io_unplug() resulting in sub-optimal RPCs (sub-optimal
203 * RPCs only happen during `warm up' phase when less than
204 * cl_max_rpcs_in_flight RPCs are in flight). To balance these
205 * conflicting requirements, one might unplug once enough
206 * pages to form a large RPC were queued (i.e., use
207 * cli->cl_max_pages_per_rpc as OSC_QUEUE_GRAIN, see
208 * lop_makes_rpc()), or ignore soft-lockup issue altogether.
210 * XXX lock_need_resched() should be used here, but it is not
211 * available in the older of supported kernels.
213 if (queued > OSC_QUEUE_GRAIN || cfs_need_resched()) {
215 osc_io_unplug(env, osc, cli);
220 LASSERT(ergo(result == 0, cli != NULL));
221 LASSERT(ergo(result == 0, osc == osc0));
224 osc_io_unplug(env, osc, cli);
225 CDEBUG(D_INFO, "%i/%i %i\n", qin->pl_nr, qout->pl_nr, result);
226 return qout->pl_nr > 0 ? 0 : result;
229 static void osc_page_touch_at(const struct lu_env *env,
230 struct cl_object *obj, pgoff_t idx, unsigned to)
232 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
233 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
237 /* offset within stripe */
238 kms = cl_offset(obj, idx) + to;
240 cl_object_attr_lock(obj);
244 * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
248 CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
249 kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
250 loi->loi_lvb.lvb_size);
253 if (kms > loi->loi_kms) {
257 if (kms > loi->loi_lvb.lvb_size) {
258 attr->cat_size = kms;
261 cl_object_attr_set(env, obj, attr, valid);
262 cl_object_attr_unlock(obj);
266 * This is called when a page is accessed within file in a way that creates
267 * new page, if one were missing (i.e., if there were a hole at that place in
268 * the file, or accessed page is beyond the current file size). Examples:
269 * ->commit_write() and ->nopage() methods.
271 * Expand stripe KMS if necessary.
273 static void osc_page_touch(const struct lu_env *env,
274 struct osc_page *opage, unsigned to)
276 struct cl_page *page = opage->ops_cl.cpl_page;
277 struct cl_object *obj = opage->ops_cl.cpl_obj;
279 osc_page_touch_at(env, obj, page->cp_index, to);
283 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
285 * \retval -EIO transfer initiated against this osc will most likely fail
286 * \retval 0 transfer initiated against this osc will most likely succeed.
288 * The reason for this check is to immediately return an error to the caller
289 * in the case of a deactivated import. Note, that import can be deactivated
290 * later, while pages, dirtied by this IO, are still in the cache, but this is
291 * irrelevant, because that would still return an error to the application (if
292 * it does fsync), but many applications don't do fsync because of performance
293 * issues, and we wanted to return an -EIO at write time to notify the
296 static int osc_io_prepare_write(const struct lu_env *env,
297 const struct cl_io_slice *ios,
298 const struct cl_page_slice *slice,
299 unsigned from, unsigned to)
301 struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
302 struct obd_import *imp = class_exp2cliimp(dev->od_exp);
307 * This implements OBD_BRW_CHECK logic from old client.
310 RETURN(imp == NULL || imp->imp_invalid ? -EIO : 0);
313 static int osc_io_commit_write(const struct lu_env *env,
314 const struct cl_io_slice *ios,
315 const struct cl_page_slice *slice,
316 unsigned from, unsigned to)
318 struct osc_page *opg = cl2osc_page(slice);
319 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
320 struct osc_async_page *oap = &opg->ops_oap;
325 * XXX instead of calling osc_page_touch() here and in
326 * osc_io_fault_start() it might be more logical to introduce
327 * cl_page_touch() method, that generic cl_io_commit_write() and page
330 osc_page_touch(env, cl2osc_page(slice), to);
331 if (!client_is_remote(osc_export(obj)) &&
332 cfs_capable(CFS_CAP_SYS_RESOURCE))
333 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
338 static int osc_io_fault_start(const struct lu_env *env,
339 const struct cl_io_slice *ios)
342 struct cl_fault_io *fio;
347 fio = &io->u.ci_fault;
348 CDEBUG(D_INFO, "%lu %i %i\n",
349 fio->ft_index, fio->ft_writable, fio->ft_nob);
351 * If mapping is writeable, adjust kms to cover this page,
352 * but do not extend kms beyond actual file size.
355 if (fio->ft_writable)
356 osc_page_touch_at(env, ios->cis_obj,
357 fio->ft_index, fio->ft_nob);
361 static int osc_punch_upcall(void *a, int rc)
363 struct osc_punch_cbargs *args = a;
366 complete(&args->opc_sync);
372 * Checks that there are no pages being written in the extent being truncated.
374 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
375 struct osc_io *oio, size_t size)
378 struct osc_object *obj;
379 struct cl_object *clob;
380 struct cl_page *page;
381 struct cl_page_list *list;
385 clob = oio->oi_cl.cis_obj;
387 start = cl_index(clob, size);
388 partial = cl_offset(clob, start) < size;
389 list = &osc_env_info(env)->oti_plist;
392 * Complain if there are pages in the truncated region.
394 * XXX this is quite expensive check.
396 cl_page_list_init(list);
397 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, list);
399 cl_page_list_for_each(page, list)
400 CL_PAGE_DEBUG(D_ERROR, env, page, "exists %lu\n", start);
402 cl_page_list_disown(env, io, list);
403 cl_page_list_fini(env, list);
405 spin_lock(&obj->oo_seatbelt);
406 list_for_each_entry(cp, &obj->oo_inflight[CRT_WRITE], ops_inflight) {
407 page = cp->ops_cl.cpl_page;
408 if (page->cp_index >= start + partial) {
409 cfs_task_t *submitter;
411 submitter = cp->ops_submitter;
413 * XXX Linux specific debugging stuff.
415 CL_PAGE_DEBUG(D_ERROR, env, page, "%s/%i %lu\n",
416 submitter->comm, submitter->pid, start);
417 libcfs_debug_dumpstack(submitter);
420 spin_unlock(&obj->oo_seatbelt);
422 #else /* __KERNEL__ */
423 # define osc_trunc_check(env, io, oio, size) do {;} while (0)
426 static int osc_io_trunc_start(const struct lu_env *env,
427 const struct cl_io_slice *slice)
429 struct cl_io *io = slice->cis_io;
430 struct osc_io *oio = cl2osc_io(env, slice);
431 struct cl_object *obj = slice->cis_obj;
432 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
433 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
434 struct obdo *oa = &oio->oi_oa;
435 struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
436 struct obd_capa *capa;
437 loff_t size = io->u.ci_truncate.tr_size;
441 memset(oa, 0, sizeof(*oa));
443 osc_trunc_check(env, io, oio, size);
445 if (oio->oi_lockless == 0) {
446 cl_object_attr_lock(obj);
447 result = cl_object_attr_get(env, obj, attr);
449 attr->cat_size = attr->cat_kms = size;
450 result = cl_object_attr_set(env, obj, attr,
453 cl_object_attr_unlock(obj);
457 oa->o_id = loi->loi_id;
458 oa->o_gr = loi->loi_gr;
459 oa->o_mtime = attr->cat_mtime;
460 oa->o_atime = attr->cat_atime;
461 oa->o_ctime = attr->cat_ctime;
462 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
463 OBD_MD_FLCTIME | OBD_MD_FLMTIME;
464 if (oio->oi_lockless) {
465 oa->o_flags = OBD_FL_TRUNCLOCK;
466 oa->o_valid |= OBD_MD_FLFLAGS;
469 oa->o_blocks = OBD_OBJECT_EOF;
470 oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
472 capa = io->u.ci_truncate.tr_capa;
473 init_completion(&cbargs->opc_sync);
474 result = osc_punch_base(osc_export(cl2osc(obj)), oa, capa,
475 osc_punch_upcall, cbargs, PTLRPCD_SET);
480 static void osc_io_trunc_end(const struct lu_env *env,
481 const struct cl_io_slice *slice)
483 struct cl_io *io = slice->cis_io;
484 struct osc_io *oio = cl2osc_io(env, slice);
485 struct osc_punch_cbargs *cbargs = &oio->oi_punch_cbarg;
486 struct obdo *oa = &oio->oi_oa;
489 wait_for_completion(&cbargs->opc_sync);
491 result = io->ci_result = cbargs->opc_rc;
493 struct cl_object *obj = slice->cis_obj;
494 if (oio->oi_lockless == 0) {
495 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
498 /* Update kms & size */
499 if (oa->o_valid & OBD_MD_FLSIZE) {
500 attr->cat_size = oa->o_size;
501 attr->cat_kms = oa->o_size;
502 valid |= CAT_KMS|CAT_SIZE;
504 if (oa->o_valid & OBD_MD_FLBLOCKS) {
505 attr->cat_blocks = oa->o_blocks;
508 if (oa->o_valid & OBD_MD_FLMTIME) {
509 attr->cat_mtime = oa->o_mtime;
512 if (oa->o_valid & OBD_MD_FLCTIME) {
513 attr->cat_ctime = oa->o_ctime;
516 if (oa->o_valid & OBD_MD_FLATIME) {
517 attr->cat_atime = oa->o_atime;
520 cl_object_attr_lock(obj);
521 result = cl_object_attr_set(env, obj, attr, valid);
522 cl_object_attr_unlock(obj);
523 } else { /* lockless truncate */
524 struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
525 /* XXX: Need a lock. */
526 osd->od_stats.os_lockless_truncates++;
533 static const struct cl_io_operations osc_io_ops = {
536 .cio_fini = osc_io_fini
539 .cio_fini = osc_io_fini
542 .cio_start = osc_io_trunc_start,
543 .cio_end = osc_io_trunc_end
546 .cio_fini = osc_io_fini,
547 .cio_start = osc_io_fault_start
550 .cio_fini = osc_io_fini
555 .cio_submit = osc_io_submit
558 .cio_submit = osc_io_submit
561 .cio_prepare_write = osc_io_prepare_write,
562 .cio_commit_write = osc_io_commit_write
565 /*****************************************************************************
567 * Transfer operations.
571 static int osc_req_prep(const struct lu_env *env,
572 const struct cl_req_slice *slice)
577 static void osc_req_completion(const struct lu_env *env,
578 const struct cl_req_slice *slice, int ioret)
582 or = cl2osc_req(slice);
583 OBD_SLAB_FREE_PTR(or, osc_req_kmem);
587 * Implementation of struct cl_req_operations::cro_attr_set() for osc
588 * layer. osc is responsible for struct obdo::o_id and struct obdo::o_gr
591 static void osc_req_attr_set(const struct lu_env *env,
592 const struct cl_req_slice *slice,
593 const struct cl_object *obj,
594 struct cl_req_attr *attr, obd_valid flags)
596 struct lov_oinfo *oinfo;
597 struct cl_req *clerq;
598 struct cl_page *apage; /* _some_ page in @clerq */
599 struct cl_lock *lock; /* _some_ lock protecting @apage */
600 struct osc_lock *olck;
601 struct osc_page *opg;
605 oinfo = cl2osc(obj)->oo_oinfo;
606 if (flags & OBD_MD_FLID) {
607 oa->o_id = oinfo->loi_id;
608 oa->o_valid |= OBD_MD_FLID;
610 if (flags & OBD_MD_FLGROUP) {
611 oa->o_gr = oinfo->loi_gr;
612 oa->o_valid |= OBD_MD_FLGROUP;
614 if (flags & OBD_MD_FLHANDLE) {
615 clerq = slice->crs_req;
616 LASSERT(!list_empty(&clerq->crq_pages));
617 apage = container_of(clerq->crq_pages.next,
618 struct cl_page, cp_flight);
619 opg = osc_cl_page_osc(apage);
620 apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
621 lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
622 LASSERT(lock != NULL);
623 olck = osc_lock_at(lock);
624 LASSERT(olck != NULL);
625 /* check for lockless io. */
626 if (olck->ols_lock != NULL) {
627 oa->o_handle = olck->ols_lock->l_remote_handle;
628 oa->o_valid |= OBD_MD_FLHANDLE;
630 cl_lock_put(env, lock);
634 static const struct cl_req_operations osc_req_ops = {
635 .cro_prep = osc_req_prep,
636 .cro_attr_set = osc_req_attr_set,
637 .cro_completion = osc_req_completion
641 int osc_io_init(const struct lu_env *env,
642 struct cl_object *obj, struct cl_io *io)
644 struct osc_io *oio = osc_env_io(env);
646 CL_IO_SLICE_CLEAN(oio, oi_cl);
647 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
651 int osc_req_init(const struct lu_env *env, struct cl_device *dev,
657 OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO);
659 cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);