1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 /** \addtogroup osc osc @{ */
43 #define DEBUG_SUBSYSTEM S_OSC
45 #include "osc_cl_internal.h"
48 * Comment out osc_page_protected because it may sleep inside the
49 * the client_obd_list_lock.
50 * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
51 * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
52 * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
55 static int osc_page_is_dlocked(const struct lu_env *env,
56 const struct osc_page *opg,
57 enum cl_lock_mode mode, int pending, int unref)
60 struct osc_object *obj;
61 struct osc_thread_info *info;
62 struct ldlm_res_id *resname;
63 struct lustre_handle *lockh;
64 ldlm_policy_data_t *policy;
70 info = osc_env_info(env);
71 resname = &info->oti_resname;
72 policy = &info->oti_policy;
73 lockh = &info->oti_handle;
74 page = opg->ops_cl.cpl_page;
75 obj = cl2osc(opg->ops_cl.cpl_obj);
77 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
79 flags |= LDLM_FL_CBPENDING;
81 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
82 osc_lock_build_res(env, obj, resname);
83 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
84 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
85 dlmmode, &flags, NULL, lockh, unref);
89 * Checks an invariant that a page in the cache is covered by a lock, as
92 static int osc_page_protected(const struct lu_env *env,
93 const struct osc_page *opg,
94 enum cl_lock_mode mode, int unref)
96 struct cl_object_header *hdr;
99 struct cl_lock_descr *descr;
102 LINVRNT(!opg->ops_temp);
104 page = opg->ops_cl.cpl_page;
105 if (page->cp_owner != NULL &&
106 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
108 * If IO is done without locks (liblustre, or lloop), lock is
113 /* otherwise check for a DLM lock */
114 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
116 /* maybe this page is a part of a lockless io? */
117 hdr = cl_object_header(opg->ops_cl.cpl_obj);
118 descr = &osc_env_info(env)->oti_descr;
119 descr->cld_mode = mode;
120 descr->cld_start = page->cp_index;
121 descr->cld_end = page->cp_index;
122 spin_lock(&hdr->coh_lock_guard);
123 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
125 * Lock-less sub-lock has to be either in HELD state
126 * (when io is actively going on), or in CACHED state,
127 * when top-lock is being unlocked:
128 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
130 if ((scan->cll_state == CLS_HELD ||
131 scan->cll_state == CLS_CACHED) &&
132 cl_lock_ext_match(&scan->cll_descr, descr)) {
133 struct osc_lock *olck;
135 olck = osc_lock_at(scan);
136 result = osc_lock_is_lockless(olck);
140 spin_unlock(&hdr->coh_lock_guard);
145 static int osc_page_protected(const struct lu_env *env,
146 const struct osc_page *opg,
147 enum cl_lock_mode mode, int unref)
153 /*****************************************************************************
158 static void osc_page_fini(const struct lu_env *env,
159 struct cl_page_slice *slice)
161 struct osc_page *opg = cl2osc_page(slice);
162 CDEBUG(D_TRACE, "%p\n", opg);
163 OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
166 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
168 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
170 LASSERT(!opg->ops_transfer_pinned);
172 lu_ref_add_atomic(&page->cp_reference, label, page);
173 opg->ops_transfer_pinned = 1;
176 static void osc_page_transfer_put(const struct lu_env *env,
177 struct osc_page *opg)
179 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
181 if (opg->ops_transfer_pinned) {
182 lu_ref_del(&page->cp_reference, "transfer", page);
183 opg->ops_transfer_pinned = 0;
184 cl_page_put(env, page);
189 * This is called once for every page when it is submitted for a transfer
190 * either opportunistic (osc_page_cache_add()), or immediate
191 * (osc_page_submit()).
193 static void osc_page_transfer_add(const struct lu_env *env,
194 struct osc_page *opg, enum cl_req_type crt)
196 struct osc_object *obj;
198 LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
200 obj = cl2osc(opg->ops_cl.cpl_obj);
201 spin_lock(&obj->oo_seatbelt);
202 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
203 opg->ops_submitter = cfs_current();
204 spin_unlock(&obj->oo_seatbelt);
207 static int osc_page_cache_add(const struct lu_env *env,
208 const struct cl_page_slice *slice,
209 struct cl_io *unused)
211 struct osc_page *opg = cl2osc_page(slice);
212 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
213 struct osc_io *oio = osc_env_io(env);
218 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
221 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
222 brw_flags = osc_io_srvlock(oio) ? OBD_BRW_SRVLOCK : 0;
223 if (!client_is_remote(osc_export(obj)) &&
224 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
225 brw_flags |= OBD_BRW_NOQUOTA;
226 noquota = OBD_BRW_NOQUOTA;
229 osc_page_transfer_get(opg, "transfer\0cache");
230 result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
231 &opg->ops_oap, OBD_BRW_WRITE | noquota,
234 osc_page_transfer_put(env, opg);
236 osc_page_transfer_add(env, opg, CRT_WRITE);
240 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
241 pgoff_t start, pgoff_t end)
243 memset(policy, 0, sizeof *policy);
244 policy->l_extent.start = cl_offset(obj, start);
245 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
248 static int osc_page_is_under_lock(const struct lu_env *env,
249 const struct cl_page_slice *slice,
250 struct cl_io *unused)
252 struct cl_lock *lock;
256 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
259 cl_lock_put(env, lock);
266 static int osc_page_fail(const struct lu_env *env,
267 const struct cl_page_slice *slice,
268 struct cl_io *unused)
278 static const char *osc_list(struct list_head *head)
280 return list_empty(head) ? "-" : "+";
283 static int osc_page_print(const struct lu_env *env,
284 const struct cl_page_slice *slice,
285 void *cookie, lu_printer_t printer)
287 struct osc_page *opg = cl2osc_page(slice);
288 struct osc_async_page *oap = &opg->ops_oap;
290 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
291 "< %#x %d %u %s %s %s >"
292 "< %llu %u %#x %#x %p %p %p %p %p >"
296 oap->oap_magic, oap->oap_cmd,
297 oap->oap_interrupted,
298 osc_list(&oap->oap_pending_item),
299 osc_list(&oap->oap_urgent_item),
300 osc_list(&oap->oap_rpc_item),
302 oap->oap_obj_off, oap->oap_page_off,
303 oap->oap_async_flags, oap->oap_brw_flags,
305 oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
306 oap->oap_caller_data,
308 osc_list(&opg->ops_inflight),
309 opg->ops_submitter, opg->ops_transfer_pinned);
312 static void osc_page_delete(const struct lu_env *env,
313 const struct cl_page_slice *slice)
315 struct osc_page *opg = cl2osc_page(slice);
316 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
317 struct osc_async_page *oap = &opg->ops_oap;
320 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
323 CDEBUG(D_TRACE, "%p\n", opg);
324 osc_page_transfer_put(env, opg);
325 rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
327 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
328 "Trying to teardown failed: %d\n", rc);
331 spin_lock(&obj->oo_seatbelt);
332 list_del_init(&opg->ops_inflight);
333 spin_unlock(&obj->oo_seatbelt);
337 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
340 struct osc_page *opg = cl2osc_page(slice);
341 struct osc_async_page *oap = &opg->ops_oap;
343 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
345 opg->ops_from = from;
347 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
350 static int osc_page_cancel(const struct lu_env *env,
351 const struct cl_page_slice *slice)
353 struct osc_page *opg = cl2osc_page(slice);
354 struct osc_async_page *oap = &opg->ops_oap;
357 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
359 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
360 /* Check if the transferring against this page
361 * is completed, or not even queued. */
362 if (opg->ops_transfer_pinned)
363 /* FIXME: may not be interrupted.. */
364 rc = osc_oap_interrupted(env, oap);
365 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
366 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
370 static const struct cl_page_operations osc_page_ops = {
371 .cpo_fini = osc_page_fini,
372 .cpo_print = osc_page_print,
373 .cpo_delete = osc_page_delete,
374 .cpo_is_under_lock = osc_page_is_under_lock,
377 .cpo_cache_add = osc_page_fail
380 .cpo_cache_add = osc_page_cache_add
383 .cpo_clip = osc_page_clip,
384 .cpo_cancel = osc_page_cancel
387 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
389 struct osc_page *opg = data;
390 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
393 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
394 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
397 result = cl_page_make_ready(env, page, CRT_WRITE);
401 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
403 struct cl_page *page;
404 struct osc_page *osc = data;
405 struct cl_object *obj;
406 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
411 LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
413 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
414 LASSERT(!(cmd & OBD_BRW_READ));
415 LASSERT(osc != NULL);
416 page = osc->ops_cl.cpl_page;
417 obj = osc->ops_cl.cpl_obj;
419 cl_object_attr_lock(obj);
420 result = cl_object_attr_get(env, obj, attr);
421 cl_object_attr_unlock(obj);
425 if (cl_offset(obj, page->cp_index) >= kms)
426 /* catch race with truncate */
428 else if (cl_offset(obj, page->cp_index + 1) > kms)
429 /* catch sub-page write at end of file */
430 return kms % CFS_PAGE_SIZE;
432 return CFS_PAGE_SIZE;
435 static int osc_completion(const struct lu_env *env,
436 void *data, int cmd, struct obdo *oa, int rc)
438 struct osc_page *opg = data;
439 struct osc_async_page *oap = &opg->ops_oap;
440 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
441 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
442 enum cl_req_type crt;
444 LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
445 LINVRNT(cl_page_is_vmlocked(env, page));
449 cmd &= ~OBD_BRW_NOQUOTA;
450 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
451 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
452 LASSERT(opg->ops_transfer_pinned);
455 * page->cp_req can be NULL if io submission failed before
456 * cl_req was allocated.
458 if (page->cp_req != NULL)
459 cl_req_page_done(env, page);
460 LASSERT(page->cp_req == NULL);
462 /* As the transfer for this page is being done, clear the flags */
463 oap->oap_async_flags = 0;
465 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
466 /* Clear opg->ops_transfer_pinned before VM lock is released. */
467 opg->ops_transfer_pinned = 0;
469 spin_lock(&obj->oo_seatbelt);
470 LASSERT(opg->ops_submitter != NULL);
471 LASSERT(!list_empty(&opg->ops_inflight));
472 list_del_init(&opg->ops_inflight);
473 spin_unlock(&obj->oo_seatbelt);
475 cl_page_completion(env, page, crt, rc);
478 if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
479 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
480 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
481 int bytes = opg->ops_to - opg->ops_from;
484 stats->os_lockless_reads += bytes;
486 stats->os_lockless_writes += bytes;
490 * This has to be the last operation with the page, as locks are
491 * released in cl_page_completion() and nothing except for the
492 * reference counter protects page from concurrent reclaim.
494 lu_ref_del(&page->cp_reference, "transfer", page);
496 * As page->cp_obj is pinned by a reference from page->cp_req, it is
497 * safe to call cl_page_put() without risking object destruction in a
498 * non-blocking context.
500 cl_page_put(env, page);
504 const static struct obd_async_page_ops osc_async_page_ops = {
505 .ap_make_ready = osc_make_ready,
506 .ap_refresh_count = osc_refresh_count,
507 .ap_completion = osc_completion
510 struct cl_page *osc_page_init(const struct lu_env *env,
511 struct cl_object *obj,
512 struct cl_page *page, cfs_page_t *vmpage)
514 struct osc_object *osc = cl2osc(obj);
515 struct osc_page *opg;
518 OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
520 void *oap = &opg->ops_oap;
523 opg->ops_to = CFS_PAGE_SIZE;
525 result = osc_prep_async_page(osc_export(osc),
526 NULL, osc->oo_oinfo, vmpage,
527 cl_offset(obj, page->cp_index),
529 opg, (void **)&oap, 1, NULL);
531 cl_page_slice_add(page, &opg->ops_cl, obj,
534 * Cannot assert osc_page_protected() here as read-ahead
535 * creates temporary pages outside of a lock.
537 #ifdef INVARIANT_CHECK
538 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
540 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
543 return ERR_PTR(result);
547 * Helper function called by osc_io_submit() for every page in an immediate
548 * transfer (i.e., transferred synchronously).
550 void osc_io_submit_page(const struct lu_env *env,
551 struct osc_io *oio, struct osc_page *opg,
552 enum cl_req_type crt)
554 struct osc_async_page *oap = &opg->ops_oap;
555 struct client_obd *cli = oap->oap_cli;
557 LINVRNT(osc_page_protected(env, opg,
558 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
560 oap->oap_page_off = opg->ops_from;
561 oap->oap_count = opg->ops_to - opg->ops_from;
562 /* Give a hint to OST that requests are coming from kswapd - bug19529 */
563 if (libcfs_memory_pressure_get())
564 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
565 oap->oap_brw_flags |= OBD_BRW_SYNC;
566 if (osc_io_srvlock(oio))
567 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
569 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
570 if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
571 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
572 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
573 oap->oap_cmd |= OBD_BRW_NOQUOTA;
576 oap->oap_async_flags |= OSC_FLAGS;
577 if (oap->oap_cmd & OBD_BRW_READ)
578 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
579 else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
580 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
582 osc_oap_to_pending(oap);
583 osc_page_transfer_get(opg, "transfer\0imm");
584 osc_page_transfer_add(env, opg, crt);