1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 /** \addtogroup osc osc @{ */
43 #define DEBUG_SUBSYSTEM S_OSC
45 #include "osc_cl_internal.h"
47 static int osc_page_is_dlocked(const struct lu_env *env,
48 const struct osc_page *opg,
49 enum cl_lock_mode mode, int pending, int unref)
52 struct osc_object *obj;
53 struct osc_thread_info *info;
54 struct ldlm_res_id *resname;
55 struct lustre_handle *lockh;
56 ldlm_policy_data_t *policy;
60 info = osc_env_info(env);
61 resname = &info->oti_resname;
62 policy = &info->oti_policy;
63 lockh = &info->oti_handle;
64 page = opg->ops_cl.cpl_page;
65 obj = cl2osc(opg->ops_cl.cpl_obj);
67 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
69 flags |= LDLM_FL_CBPENDING;
71 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
72 osc_lock_build_res(env, obj, resname);
73 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
74 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
75 dlmmode, &flags, NULL, lockh, unref);
79 * Checks an invariant that a page in the cache is covered by a lock, as
82 static int osc_page_protected(const struct lu_env *env,
83 const struct osc_page *opg,
84 enum cl_lock_mode mode, int unref)
86 struct cl_object_header *hdr;
89 struct cl_lock_descr *descr;
92 LINVRNT(!opg->ops_temp);
94 page = opg->ops_cl.cpl_page;
95 if (page->cp_owner != NULL &&
96 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
98 * If IO is done without locks (liblustre, or lloop), lock is
103 /* otherwise check for a DLM lock */
104 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
106 /* maybe this page is a part of a lockless io? */
107 hdr = cl_object_header(opg->ops_cl.cpl_obj);
108 descr = &osc_env_info(env)->oti_descr;
109 descr->cld_mode = mode;
110 descr->cld_start = page->cp_index;
111 descr->cld_end = page->cp_index;
112 spin_lock(&hdr->coh_lock_guard);
113 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
115 * Lock-less sub-lock has to be either in HELD state
116 * (when io is actively going on), or in CACHED state,
117 * when top-lock is being unlocked:
118 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
120 if ((scan->cll_state == CLS_HELD ||
121 scan->cll_state == CLS_CACHED) &&
122 cl_lock_ext_match(&scan->cll_descr, descr)) {
123 struct osc_lock *olck;
125 olck = osc_lock_at(scan);
126 result = osc_lock_is_lockless(olck);
130 spin_unlock(&hdr->coh_lock_guard);
135 /*****************************************************************************
140 static void osc_page_fini(const struct lu_env *env,
141 struct cl_page_slice *slice)
143 struct osc_page *opg = cl2osc_page(slice);
144 CDEBUG(D_TRACE, "%p\n", opg);
145 OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
148 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
150 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
152 LASSERT(!opg->ops_transfer_pinned);
154 lu_ref_add_atomic(&page->cp_reference, label, page);
155 opg->ops_transfer_pinned = 1;
158 static void osc_page_transfer_put(const struct lu_env *env,
159 struct osc_page *opg)
161 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
163 if (opg->ops_transfer_pinned) {
164 lu_ref_del(&page->cp_reference, "transfer", page);
165 opg->ops_transfer_pinned = 0;
166 cl_page_put(env, page);
171 * This is called once for every page when it is submitted for a transfer
172 * either opportunistic (osc_page_cache_add()), or immediate
173 * (osc_page_submit()).
175 static void osc_page_transfer_add(const struct lu_env *env,
176 struct osc_page *opg, enum cl_req_type crt)
178 struct osc_object *obj;
180 LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
182 obj = cl2osc(opg->ops_cl.cpl_obj);
183 spin_lock(&obj->oo_seatbelt);
184 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
185 opg->ops_submitter = cfs_current();
186 spin_unlock(&obj->oo_seatbelt);
189 static int osc_page_cache_add(const struct lu_env *env,
190 const struct cl_page_slice *slice,
193 struct osc_page *opg = cl2osc_page(slice);
194 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
195 struct osc_io *oio = osc_env_io(env);
200 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
203 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
204 brw_flags = osc_io_srvlock(oio) ? OBD_BRW_SRVLOCK : 0;
205 if (!client_is_remote(osc_export(obj)) &&
206 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
207 brw_flags |= OBD_BRW_NOQUOTA;
208 noquota = OBD_BRW_NOQUOTA;
211 osc_page_transfer_get(opg, "transfer\0cache");
212 result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
213 &opg->ops_oap, OBD_BRW_WRITE | noquota,
216 osc_page_transfer_put(env, opg);
218 osc_page_transfer_add(env, opg, CRT_WRITE);
222 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
223 pgoff_t start, pgoff_t end)
225 memset(policy, 0, sizeof *policy);
226 policy->l_extent.start = cl_offset(obj, start);
227 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
230 static int osc_page_is_under_lock(const struct lu_env *env,
231 const struct cl_page_slice *slice,
234 struct cl_lock *lock;
238 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
241 cl_lock_put(env, lock);
248 static int osc_page_fail(const struct lu_env *env,
249 const struct cl_page_slice *slice, struct cl_io *_)
259 static const char *osc_list(struct list_head *head)
261 return list_empty(head) ? "-" : "+";
264 static int osc_page_print(const struct lu_env *env,
265 const struct cl_page_slice *slice,
266 void *cookie, lu_printer_t printer)
268 struct osc_page *opg = cl2osc_page(slice);
269 struct osc_async_page *oap = &opg->ops_oap;
271 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
272 "%#x %d %u %s %s %s %llu %u %#x %p %p %p %p %p\n",
273 opg, oap->oap_magic, oap->oap_cmd,
274 oap->oap_interrupted,
275 osc_list(&oap->oap_pending_item),
276 osc_list(&oap->oap_urgent_item),
277 osc_list(&oap->oap_rpc_item),
278 oap->oap_obj_off, oap->oap_page_off,
279 oap->oap_async_flags, oap->oap_request,
280 oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
281 oap->oap_caller_data);
284 static void osc_page_delete(const struct lu_env *env,
285 const struct cl_page_slice *slice)
287 struct osc_page *opg = cl2osc_page(slice);
288 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
289 struct osc_async_page *oap = &opg->ops_oap;
292 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
295 CDEBUG(D_TRACE, "%p\n", opg);
296 osc_page_transfer_put(env, opg);
297 rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
298 LASSERTF(rc == 0, "%i\n", rc);
299 spin_lock(&obj->oo_seatbelt);
300 list_del_init(&opg->ops_inflight);
301 spin_unlock(&obj->oo_seatbelt);
305 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
308 struct osc_page *opg = cl2osc_page(slice);
309 struct osc_async_page *oap = &opg->ops_oap;
311 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
313 opg->ops_from = from;
315 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
318 static int osc_page_cancel(const struct lu_env *env,
319 const struct cl_page_slice *slice)
321 struct osc_page *opg = cl2osc_page(slice);
322 struct osc_async_page *oap = &opg->ops_oap;
325 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
327 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
328 /* Check if the transferring against this page
329 * is completed, or not even queued. */
330 if (opg->ops_transfer_pinned)
331 /* FIXME: may not be interrupted.. */
332 rc = osc_oap_interrupted(env, oap);
333 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
334 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
338 static const struct cl_page_operations osc_page_ops = {
339 .cpo_fini = osc_page_fini,
340 .cpo_print = osc_page_print,
341 .cpo_delete = osc_page_delete,
342 .cpo_is_under_lock = osc_page_is_under_lock,
345 .cpo_cache_add = osc_page_fail
348 .cpo_cache_add = osc_page_cache_add
351 .cpo_clip = osc_page_clip,
352 .cpo_cancel = osc_page_cancel
355 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
357 struct osc_page *opg = data;
358 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
361 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
362 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
365 result = cl_page_make_ready(env, page, CRT_WRITE);
369 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
371 struct cl_page *page;
372 struct osc_page *osc = data;
373 struct cl_object *obj;
374 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
379 LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
381 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
382 LASSERT(!(cmd & OBD_BRW_READ));
383 LASSERT(osc != NULL);
384 page = osc->ops_cl.cpl_page;
385 obj = osc->ops_cl.cpl_obj;
387 cl_object_attr_lock(obj);
388 result = cl_object_attr_get(env, obj, attr);
389 cl_object_attr_unlock(obj);
393 if (cl_offset(obj, page->cp_index) >= kms)
394 /* catch race with truncate */
396 else if (cl_offset(obj, page->cp_index + 1) > kms)
397 /* catch sub-page write at end of file */
398 return kms % CFS_PAGE_SIZE;
400 return CFS_PAGE_SIZE;
403 static int osc_completion(const struct lu_env *env,
404 void *data, int cmd, struct obdo *oa, int rc)
406 struct osc_page *opg = data;
407 struct osc_async_page *oap = &opg->ops_oap;
408 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
409 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
410 enum cl_req_type crt;
412 LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
413 LINVRNT(cl_page_is_vmlocked(env, page));
417 cmd &= ~OBD_BRW_NOQUOTA;
418 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
419 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
420 LASSERT(opg->ops_transfer_pinned);
423 * page->cp_req can be NULL if io submission failed before
424 * cl_req was allocated.
426 if (page->cp_req != NULL)
427 cl_req_page_done(env, page);
428 LASSERT(page->cp_req == NULL);
430 /* As the transfer for this page is being done, clear the flags */
431 oap->oap_async_flags = 0;
433 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
434 /* Clear opg->ops_transfer_pinned before VM lock is released. */
435 opg->ops_transfer_pinned = 0;
437 spin_lock(&obj->oo_seatbelt);
438 LASSERT(opg->ops_submitter != NULL);
439 LASSERT(!list_empty(&opg->ops_inflight));
440 list_del_init(&opg->ops_inflight);
441 spin_unlock(&obj->oo_seatbelt);
443 cl_page_completion(env, page, crt, rc);
446 if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
447 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
448 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
449 int bytes = opg->ops_to - opg->ops_from;
452 stats->os_lockless_reads += bytes;
454 stats->os_lockless_writes += bytes;
458 * This has to be the last operation with the page, as locks are
459 * released in cl_page_completion() and nothing except for the
460 * reference counter protects page from concurrent reclaim.
462 lu_ref_del(&page->cp_reference, "transfer", page);
464 * As page->cp_obj is pinned by a reference from page->cp_req, it is
465 * safe to call cl_page_put() without risking object destruction in a
466 * non-blocking context.
468 cl_page_put(env, page);
472 const static struct obd_async_page_ops osc_async_page_ops = {
473 .ap_make_ready = osc_make_ready,
474 .ap_refresh_count = osc_refresh_count,
475 .ap_completion = osc_completion
478 struct cl_page *osc_page_init(const struct lu_env *env,
479 struct cl_object *obj,
480 struct cl_page *page, cfs_page_t *vmpage)
482 struct osc_object *osc = cl2osc(obj);
483 struct osc_page *opg;
486 OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
488 void *oap = &opg->ops_oap;
491 opg->ops_to = CFS_PAGE_SIZE;
493 result = osc_prep_async_page(osc_export(osc),
494 NULL, osc->oo_oinfo, vmpage,
495 cl_offset(obj, page->cp_index),
497 opg, (void **)&oap, 1, NULL);
499 cl_page_slice_add(page, &opg->ops_cl, obj,
502 * Cannot assert osc_page_protected() here as read-ahead
503 * creates temporary pages outside of a lock.
505 #ifdef INVARIANT_CHECK
506 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
508 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
511 return ERR_PTR(result);
515 * Helper function called by osc_io_submit() for every page in an immediate
516 * transfer (i.e., transferred synchronously).
518 void osc_io_submit_page(const struct lu_env *env,
519 struct osc_io *oio, struct osc_page *opg,
520 enum cl_req_type crt)
522 struct osc_async_page *oap = &opg->ops_oap;
523 struct client_obd *cli = oap->oap_cli;
525 LINVRNT(osc_page_protected(env, opg,
526 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
528 oap->oap_page_off = opg->ops_from;
529 oap->oap_count = opg->ops_to - opg->ops_from;
530 oap->oap_brw_flags |= OBD_BRW_SYNC;
531 if (osc_io_srvlock(oio))
532 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
534 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
535 if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
536 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
537 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
538 oap->oap_cmd |= OBD_BRW_NOQUOTA;
541 oap->oap_async_flags |= OSC_FLAGS;
542 if (oap->oap_cmd & OBD_BRW_READ)
543 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
544 else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
545 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
547 osc_oap_to_pending(oap);
548 osc_page_transfer_get(opg, "transfer\0imm");
549 osc_page_transfer_add(env, opg, crt);