1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 /** \addtogroup osc osc @{ */
43 #define DEBUG_SUBSYSTEM S_OSC
45 #include "osc_cl_internal.h"
47 static int osc_page_is_dlocked(const struct lu_env *env,
48 const struct osc_page *opg,
49 enum cl_lock_mode mode, int pending, int unref)
52 struct osc_object *obj;
53 struct osc_thread_info *info;
54 struct ldlm_res_id *resname;
55 struct lustre_handle *lockh;
56 ldlm_policy_data_t *policy;
60 info = osc_env_info(env);
61 resname = &info->oti_resname;
62 policy = &info->oti_policy;
63 lockh = &info->oti_handle;
64 page = opg->ops_cl.cpl_page;
65 obj = cl2osc(opg->ops_cl.cpl_obj);
67 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
69 flags |= LDLM_FL_CBPENDING;
71 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
72 osc_lock_build_res(env, obj, resname);
73 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
74 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
75 dlmmode, &flags, NULL, lockh, unref);
78 static int osc_page_protected(const struct lu_env *env,
79 const struct osc_page *opg,
80 enum cl_lock_mode mode, int unref)
82 struct cl_object_header *hdr;
85 struct cl_lock_descr *descr;
88 LINVRNT(!opg->ops_temp);
90 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
92 /* maybe this page is a part of a lockless io? */
93 hdr = cl_object_header(opg->ops_cl.cpl_obj);
94 page = opg->ops_cl.cpl_page;
95 descr = &osc_env_info(env)->oti_descr;
96 descr->cld_mode = mode;
97 descr->cld_start = page->cp_index;
98 descr->cld_end = page->cp_index;
99 spin_lock(&hdr->coh_lock_guard);
100 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
102 * Lock-less sub-lock has to be either in HELD state
103 * (when io is actively going on), or in CACHED state,
104 * when top-lock is being unlocked:
105 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
107 if ((scan->cll_state == CLS_HELD ||
108 scan->cll_state == CLS_CACHED) &&
109 cl_lock_ext_match(&scan->cll_descr, descr)) {
110 struct osc_lock *olck;
112 olck = osc_lock_at(scan);
113 result = osc_lock_is_lockless(olck);
117 spin_unlock(&hdr->coh_lock_guard);
122 /*****************************************************************************
127 static void osc_page_fini(const struct lu_env *env,
128 struct cl_page_slice *slice)
130 struct osc_page *opg = cl2osc_page(slice);
131 CDEBUG(D_TRACE, "%p\n", opg);
132 OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
135 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
137 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
139 LASSERT(!opg->ops_transfer_pinned);
141 lu_ref_add_atomic(&page->cp_reference, label, page);
142 opg->ops_transfer_pinned = 1;
145 static void osc_page_transfer_put(const struct lu_env *env,
146 struct osc_page *opg)
148 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
150 if (opg->ops_transfer_pinned) {
151 lu_ref_del(&page->cp_reference, "transfer", page);
152 opg->ops_transfer_pinned = 0;
153 cl_page_put(env, page);
158 * This is called once for every page when it is submitted for a transfer
159 * either opportunistic (osc_page_cache_add()), or immediate
160 * (osc_page_submit()).
162 static void osc_page_transfer_add(const struct lu_env *env,
163 struct osc_page *opg, enum cl_req_type crt)
165 struct osc_object *obj;
167 obj = cl2osc(opg->ops_cl.cpl_obj);
168 spin_lock(&obj->oo_seatbelt);
169 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
170 opg->ops_submitter = cfs_current();
171 spin_unlock(&obj->oo_seatbelt);
174 static int osc_page_cache_add(const struct lu_env *env,
175 const struct cl_page_slice *slice,
178 struct osc_page *opg = cl2osc_page(slice);
179 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
180 struct osc_io *oio = osc_env_io(env);
184 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
187 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
188 brw_flags = oio->oi_lockless ? OBD_BRW_SRVLOCK : 0;
190 osc_page_transfer_get(opg, "transfer\0cache");
191 result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
192 &opg->ops_oap, OBD_BRW_WRITE,
195 osc_page_transfer_put(env, opg);
197 osc_page_transfer_add(env, opg, CRT_WRITE);
201 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
202 pgoff_t start, pgoff_t end)
204 memset(policy, 0, sizeof *policy);
205 policy->l_extent.start = cl_offset(obj, start);
206 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
209 static int osc_page_is_under_lock(const struct lu_env *env,
210 const struct cl_page_slice *slice,
213 struct cl_lock *lock;
217 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
220 cl_lock_put(env, lock);
227 static int osc_page_fail(const struct lu_env *env,
228 const struct cl_page_slice *slice, struct cl_io *_)
238 static const char *osc_list(struct list_head *head)
240 return list_empty(head) ? "-" : "+";
243 static int osc_page_print(const struct lu_env *env,
244 const struct cl_page_slice *slice,
245 void *cookie, lu_printer_t printer)
247 struct osc_page *opg = cl2osc_page(slice);
248 struct osc_async_page *oap = &opg->ops_oap;
250 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
251 "%#x %d %u %s %s %s %llu %u %#x %p %p %p %p %p\n",
252 opg, oap->oap_magic, oap->oap_cmd,
253 oap->oap_interrupted,
254 osc_list(&oap->oap_pending_item),
255 osc_list(&oap->oap_urgent_item),
256 osc_list(&oap->oap_rpc_item),
257 oap->oap_obj_off, oap->oap_page_off,
258 oap->oap_async_flags, oap->oap_request,
259 oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
260 oap->oap_caller_data);
263 static void osc_page_delete(const struct lu_env *env,
264 const struct cl_page_slice *slice)
266 struct osc_page *opg = cl2osc_page(slice);
267 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
268 struct osc_async_page *oap = &opg->ops_oap;
271 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
274 CDEBUG(D_TRACE, "%p\n", opg);
275 osc_page_transfer_put(env, opg);
276 rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
277 LASSERTF(rc == 0, "%i\n", rc);
278 spin_lock(&obj->oo_seatbelt);
279 list_del_init(&opg->ops_inflight);
280 spin_unlock(&obj->oo_seatbelt);
284 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
287 struct osc_page *opg = cl2osc_page(slice);
288 struct osc_async_page *oap = &opg->ops_oap;
290 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
292 opg->ops_from = from;
294 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
297 static int osc_page_cancel(const struct lu_env *env,
298 const struct cl_page_slice *slice)
300 struct osc_page *opg = cl2osc_page(slice);
301 struct osc_async_page *oap = &opg->ops_oap;
304 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
306 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
307 /* Check if the transferring against this page
308 * is completed, or not even queued. */
309 if (opg->ops_transfer_pinned)
310 /* FIXME: may not be interrupted.. */
311 rc = osc_oap_interrupted(env, oap);
312 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
313 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
317 static const struct cl_page_operations osc_page_ops = {
318 .cpo_fini = osc_page_fini,
319 .cpo_print = osc_page_print,
320 .cpo_delete = osc_page_delete,
321 .cpo_is_under_lock = osc_page_is_under_lock,
324 .cpo_cache_add = osc_page_fail
327 .cpo_cache_add = osc_page_cache_add
330 .cpo_clip = osc_page_clip,
331 .cpo_cancel = osc_page_cancel
334 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
336 struct osc_page *opg = data;
337 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
340 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
341 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
344 result = cl_page_make_ready(env, page, CRT_WRITE);
348 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
350 struct cl_page *page;
351 struct osc_page *osc = data;
352 struct cl_object *obj;
353 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
358 LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
360 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
361 LASSERT(!(cmd & OBD_BRW_READ));
362 LASSERT(osc != NULL);
363 page = osc->ops_cl.cpl_page;
364 obj = osc->ops_cl.cpl_obj;
366 cl_object_attr_lock(obj);
367 result = cl_object_attr_get(env, obj, attr);
368 cl_object_attr_unlock(obj);
372 if (cl_offset(obj, page->cp_index) >= kms)
373 /* catch race with truncate */
375 else if (cl_offset(obj, page->cp_index + 1) > kms)
376 /* catch sub-page write at end of file */
377 return kms % CFS_PAGE_SIZE;
379 return CFS_PAGE_SIZE;
382 static int osc_completion(const struct lu_env *env,
383 void *data, int cmd, struct obdo *oa, int rc)
385 struct osc_page *opg = data;
386 struct osc_async_page *oap = &opg->ops_oap;
387 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
388 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
389 enum cl_req_type crt;
391 LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
395 cmd &= ~OBD_BRW_NOQUOTA;
396 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
397 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
398 LASSERT(opg->ops_transfer_pinned);
401 * page->cp_req can be NULL if io submission failed before
402 * cl_req was allocated.
404 if (page->cp_req != NULL)
405 cl_req_page_done(env, page);
406 LASSERT(page->cp_req == NULL);
408 /* As the transfer for this page is being done, clear the flags */
409 oap->oap_async_flags = 0;
411 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
412 /* Clear opg->ops_transfer_pinned before VM lock is released. */
413 opg->ops_transfer_pinned = 0;
415 spin_lock(&obj->oo_seatbelt);
416 LASSERT(opg->ops_submitter != NULL);
417 LASSERT(!list_empty(&opg->ops_inflight));
418 list_del_init(&opg->ops_inflight);
419 spin_unlock(&obj->oo_seatbelt);
421 cl_page_completion(env, page, crt, rc);
424 if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
425 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
426 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
427 int bytes = opg->ops_to - opg->ops_from;
430 stats->os_lockless_reads += bytes;
432 stats->os_lockless_writes += bytes;
436 * This has to be the last operation with the page, as locks are
437 * released in cl_page_completion() and nothing except for the
438 * reference counter protects page from concurrent reclaim.
440 lu_ref_del(&page->cp_reference, "transfer", page);
442 * As page->cp_obj is pinned by a reference from page->cp_req, it is
443 * safe to call cl_page_put() without risking object destruction in a
444 * non-blocking context.
446 cl_page_put(env, page);
450 const static struct obd_async_page_ops osc_async_page_ops = {
451 .ap_make_ready = osc_make_ready,
452 .ap_refresh_count = osc_refresh_count,
453 .ap_completion = osc_completion
456 struct cl_page *osc_page_init(const struct lu_env *env,
457 struct cl_object *obj,
458 struct cl_page *page, cfs_page_t *vmpage)
460 struct osc_object *osc = cl2osc(obj);
461 struct osc_page *opg;
464 OBD_SLAB_ALLOC_PTR(opg, osc_page_kmem);
466 void *oap = &opg->ops_oap;
469 opg->ops_to = CFS_PAGE_SIZE;
470 opg->ops_ignore_quota = !!cfs_capable(CFS_CAP_SYS_RESOURCE);
472 result = osc_prep_async_page(osc_export(osc),
473 NULL, osc->oo_oinfo, vmpage,
474 cl_offset(obj, page->cp_index),
476 opg, (void **)&oap, 1, NULL);
478 cl_page_slice_add(page, &opg->ops_cl, obj,
481 * Cannot assert osc_page_protected() here as read-ahead
482 * creates temporary pages outside of a lock.
484 #ifdef INVARIANT_CHECK
485 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
487 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
490 return ERR_PTR(result);
493 void osc_io_submit_page(const struct lu_env *env,
494 struct osc_io *oio, struct osc_page *opg,
495 enum cl_req_type crt)
497 struct osc_async_page *oap = &opg->ops_oap;
498 struct client_obd *cli = oap->oap_cli;
500 LINVRNT(osc_page_protected(env, opg,
501 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
503 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
504 if (opg->ops_ignore_quota)
505 oap->oap_cmd |= OBD_BRW_NOQUOTA;
507 oap->oap_async_flags |= OSC_FLAGS;
508 if (oap->oap_cmd & OBD_BRW_READ)
509 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
510 else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
511 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
513 oap->oap_page_off = opg->ops_from;
514 oap->oap_count = opg->ops_to - opg->ops_from;
515 oap->oap_brw_flags |= oio->oi_lockless ? OBD_BRW_SRVLOCK : 0;
517 osc_oap_to_pending(oap);
518 osc_page_transfer_get(opg, "transfer\0imm");
519 osc_page_transfer_add(env, opg, crt);