1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
39 * Implementation of cl_page for OSC layer.
41 * Author: Nikita Danilov <nikita.danilov@sun.com>
44 #define DEBUG_SUBSYSTEM S_OSC
46 #include "osc_cl_internal.h"
53 * Comment out osc_page_protected because it may sleep inside the
54 * the client_obd_list_lock.
55 * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
56 * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
57 * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
60 static int osc_page_is_dlocked(const struct lu_env *env,
61 const struct osc_page *opg,
62 enum cl_lock_mode mode, int pending, int unref)
65 struct osc_object *obj;
66 struct osc_thread_info *info;
67 struct ldlm_res_id *resname;
68 struct lustre_handle *lockh;
69 ldlm_policy_data_t *policy;
75 info = osc_env_info(env);
76 resname = &info->oti_resname;
77 policy = &info->oti_policy;
78 lockh = &info->oti_handle;
79 page = opg->ops_cl.cpl_page;
80 obj = cl2osc(opg->ops_cl.cpl_obj);
82 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
84 flags |= LDLM_FL_CBPENDING;
86 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
87 osc_lock_build_res(env, obj, resname);
88 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
89 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
90 dlmmode, &flags, NULL, lockh, unref);
94 * Checks an invariant that a page in the cache is covered by a lock, as
97 static int osc_page_protected(const struct lu_env *env,
98 const struct osc_page *opg,
99 enum cl_lock_mode mode, int unref)
101 struct cl_object_header *hdr;
102 struct cl_lock *scan;
103 struct cl_page *page;
104 struct cl_lock_descr *descr;
107 LINVRNT(!opg->ops_temp);
109 page = opg->ops_cl.cpl_page;
110 if (page->cp_owner != NULL &&
111 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
113 * If IO is done without locks (liblustre, or lloop), lock is
118 /* otherwise check for a DLM lock */
119 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
121 /* maybe this page is a part of a lockless io? */
122 hdr = cl_object_header(opg->ops_cl.cpl_obj);
123 descr = &osc_env_info(env)->oti_descr;
124 descr->cld_mode = mode;
125 descr->cld_start = page->cp_index;
126 descr->cld_end = page->cp_index;
127 cfs_spin_lock(&hdr->coh_lock_guard);
128 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
130 * Lock-less sub-lock has to be either in HELD state
131 * (when io is actively going on), or in CACHED state,
132 * when top-lock is being unlocked:
133 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
135 if ((scan->cll_state == CLS_HELD ||
136 scan->cll_state == CLS_CACHED) &&
137 cl_lock_ext_match(&scan->cll_descr, descr)) {
138 struct osc_lock *olck;
140 olck = osc_lock_at(scan);
141 result = osc_lock_is_lockless(olck);
145 cfs_spin_unlock(&hdr->coh_lock_guard);
150 static int osc_page_protected(const struct lu_env *env,
151 const struct osc_page *opg,
152 enum cl_lock_mode mode, int unref)
158 /*****************************************************************************
163 static void osc_page_fini(const struct lu_env *env,
164 struct cl_page_slice *slice)
166 struct osc_page *opg = cl2osc_page(slice);
167 CDEBUG(D_TRACE, "%p\n", opg);
168 LASSERT(opg->ops_lock == NULL);
169 OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
172 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
174 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
176 LASSERT(!opg->ops_transfer_pinned);
178 lu_ref_add_atomic(&page->cp_reference, label, page);
179 opg->ops_transfer_pinned = 1;
182 static void osc_page_transfer_put(const struct lu_env *env,
183 struct osc_page *opg)
185 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
187 if (opg->ops_transfer_pinned) {
188 lu_ref_del(&page->cp_reference, "transfer", page);
189 opg->ops_transfer_pinned = 0;
190 cl_page_put(env, page);
195 * This is called once for every page when it is submitted for a transfer
196 * either opportunistic (osc_page_cache_add()), or immediate
197 * (osc_page_submit()).
199 static void osc_page_transfer_add(const struct lu_env *env,
200 struct osc_page *opg, enum cl_req_type crt)
202 struct osc_object *obj;
204 LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
206 obj = cl2osc(opg->ops_cl.cpl_obj);
207 cfs_spin_lock(&obj->oo_seatbelt);
208 cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
209 opg->ops_submitter = cfs_current();
210 cfs_spin_unlock(&obj->oo_seatbelt);
213 static int osc_page_cache_add(const struct lu_env *env,
214 const struct cl_page_slice *slice,
215 struct cl_io *unused)
217 struct osc_page *opg = cl2osc_page(slice);
218 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
220 /* All cacheable IO is async-capable */
221 int brw_flags = OBD_BRW_ASYNC;
224 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
227 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
228 brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
229 if (!client_is_remote(osc_export(obj)) &&
230 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
231 brw_flags |= OBD_BRW_NOQUOTA;
232 noquota = OBD_BRW_NOQUOTA;
235 osc_page_transfer_get(opg, "transfer\0cache");
236 result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
237 &opg->ops_oap, OBD_BRW_WRITE | noquota,
238 opg->ops_from, opg->ops_to - opg->ops_from,
241 osc_page_transfer_put(env, opg);
243 osc_page_transfer_add(env, opg, CRT_WRITE);
247 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
248 pgoff_t start, pgoff_t end)
250 memset(policy, 0, sizeof *policy);
251 policy->l_extent.start = cl_offset(obj, start);
252 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
255 static int osc_page_addref_lock(const struct lu_env *env,
256 struct osc_page *opg,
257 struct cl_lock *lock)
259 struct osc_lock *olock;
262 LASSERT(opg->ops_lock == NULL);
264 olock = osc_lock_at(lock);
265 if (cfs_atomic_inc_return(&olock->ols_pageref) <= 0) {
266 cfs_atomic_dec(&olock->ols_pageref);
270 opg->ops_lock = lock;
276 static void osc_page_putref_lock(const struct lu_env *env,
277 struct osc_page *opg)
279 struct cl_lock *lock = opg->ops_lock;
280 struct osc_lock *olock;
282 LASSERT(lock != NULL);
283 olock = osc_lock_at(lock);
285 cfs_atomic_dec(&olock->ols_pageref);
286 opg->ops_lock = NULL;
288 cl_lock_put(env, lock);
291 static int osc_page_is_under_lock(const struct lu_env *env,
292 const struct cl_page_slice *slice,
293 struct cl_io *unused)
295 struct cl_lock *lock;
296 int result = -ENODATA;
299 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
302 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
304 cl_lock_put(env, lock);
309 static void osc_page_disown(const struct lu_env *env,
310 const struct cl_page_slice *slice,
313 struct osc_page *opg = cl2osc_page(slice);
315 if (unlikely(opg->ops_lock))
316 osc_page_putref_lock(env, opg);
319 static void osc_page_completion_read(const struct lu_env *env,
320 const struct cl_page_slice *slice,
323 struct osc_page *opg = cl2osc_page(slice);
325 if (likely(opg->ops_lock))
326 osc_page_putref_lock(env, opg);
329 static int osc_page_fail(const struct lu_env *env,
330 const struct cl_page_slice *slice,
331 struct cl_io *unused)
341 static const char *osc_list(cfs_list_t *head)
343 return cfs_list_empty(head) ? "-" : "+";
346 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
348 if (opg->ops_submit_time == 0)
351 return (cfs_time_current() - opg->ops_submit_time);
354 static int osc_page_print(const struct lu_env *env,
355 const struct cl_page_slice *slice,
356 void *cookie, lu_printer_t printer)
358 struct osc_page *opg = cl2osc_page(slice);
359 struct osc_async_page *oap = &opg->ops_oap;
360 struct osc_object *obj = cl2osc(slice->cpl_obj);
361 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
362 struct lov_oinfo *loi = obj->oo_oinfo;
364 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
365 "1< %#x %d %u %s %s %s > "
366 "2< "LPU64" %u %u %#x %#x | %p %p %p %p %p > "
367 "3< %s %p %d %lu %d > "
368 "4< %d %d %d %lu %s | %s %s %s %s > "
369 "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
372 oap->oap_magic, oap->oap_cmd,
373 oap->oap_interrupted,
374 osc_list(&oap->oap_pending_item),
375 osc_list(&oap->oap_urgent_item),
376 osc_list(&oap->oap_rpc_item),
378 oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
379 oap->oap_async_flags, oap->oap_brw_flags,
381 oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
382 oap->oap_caller_data,
384 osc_list(&opg->ops_inflight),
385 opg->ops_submitter, opg->ops_transfer_pinned,
386 osc_submit_duration(opg), opg->ops_srvlock,
388 cli->cl_r_in_flight, cli->cl_w_in_flight,
389 cli->cl_max_rpcs_in_flight,
391 osc_list(&cli->cl_cache_waiters),
392 osc_list(&cli->cl_loi_ready_list),
393 osc_list(&cli->cl_loi_hp_ready_list),
394 osc_list(&cli->cl_loi_write_list),
395 osc_list(&cli->cl_loi_read_list),
397 osc_list(&loi->loi_ready_item),
398 osc_list(&loi->loi_hp_ready_item),
399 osc_list(&loi->loi_write_item),
400 osc_list(&loi->loi_read_item),
401 loi->loi_read_lop.lop_num_pending,
402 osc_list(&loi->loi_read_lop.lop_pending),
403 osc_list(&loi->loi_read_lop.lop_urgent),
404 loi->loi_write_lop.lop_num_pending,
405 osc_list(&loi->loi_write_lop.lop_pending),
406 osc_list(&loi->loi_write_lop.lop_urgent));
409 static void osc_page_delete(const struct lu_env *env,
410 const struct cl_page_slice *slice)
412 struct osc_page *opg = cl2osc_page(slice);
413 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
414 struct osc_async_page *oap = &opg->ops_oap;
417 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
420 CDEBUG(D_TRACE, "%p\n", opg);
421 osc_page_transfer_put(env, opg);
422 rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
424 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
425 "Trying to teardown failed: %d\n", rc);
428 cfs_spin_lock(&obj->oo_seatbelt);
429 cfs_list_del_init(&opg->ops_inflight);
430 cfs_spin_unlock(&obj->oo_seatbelt);
434 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
437 struct osc_page *opg = cl2osc_page(slice);
438 struct osc_async_page *oap = &opg->ops_oap;
440 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
442 opg->ops_from = from;
444 cfs_spin_lock(&oap->oap_lock);
445 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
446 cfs_spin_unlock(&oap->oap_lock);
449 static int osc_page_cancel(const struct lu_env *env,
450 const struct cl_page_slice *slice)
452 struct osc_page *opg = cl2osc_page(slice);
453 struct osc_async_page *oap = &opg->ops_oap;
456 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
458 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
459 /* Check if the transferring against this page
460 * is completed, or not even queued. */
461 if (opg->ops_transfer_pinned)
462 /* FIXME: may not be interrupted.. */
463 rc = osc_oap_interrupted(env, oap);
464 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
465 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
469 static const struct cl_page_operations osc_page_ops = {
470 .cpo_fini = osc_page_fini,
471 .cpo_print = osc_page_print,
472 .cpo_delete = osc_page_delete,
473 .cpo_is_under_lock = osc_page_is_under_lock,
474 .cpo_disown = osc_page_disown,
477 .cpo_cache_add = osc_page_fail,
478 .cpo_completion = osc_page_completion_read
481 .cpo_cache_add = osc_page_cache_add
484 .cpo_clip = osc_page_clip,
485 .cpo_cancel = osc_page_cancel
488 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
490 struct osc_page *opg = data;
491 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
494 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
495 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
498 result = cl_page_make_ready(env, page, CRT_WRITE);
500 opg->ops_submit_time = cfs_time_current();
504 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
506 struct cl_page *page;
507 struct osc_page *osc = data;
508 struct cl_object *obj;
509 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
514 LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
516 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
517 LASSERT(!(cmd & OBD_BRW_READ));
518 LASSERT(osc != NULL);
519 page = osc->ops_cl.cpl_page;
520 obj = osc->ops_cl.cpl_obj;
522 cl_object_attr_lock(obj);
523 result = cl_object_attr_get(env, obj, attr);
524 cl_object_attr_unlock(obj);
528 if (cl_offset(obj, page->cp_index) >= kms)
529 /* catch race with truncate */
531 else if (cl_offset(obj, page->cp_index + 1) > kms)
532 /* catch sub-page write at end of file */
533 return kms % CFS_PAGE_SIZE;
535 return CFS_PAGE_SIZE;
538 static int osc_completion(const struct lu_env *env,
539 void *data, int cmd, struct obdo *oa, int rc)
541 struct osc_page *opg = data;
542 struct osc_async_page *oap = &opg->ops_oap;
543 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
544 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
545 enum cl_req_type crt;
547 LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
548 LINVRNT(cl_page_is_vmlocked(env, page));
552 cmd &= ~OBD_BRW_NOQUOTA;
553 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
554 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
555 LASSERT(opg->ops_transfer_pinned);
558 * page->cp_req can be NULL if io submission failed before
559 * cl_req was allocated.
561 if (page->cp_req != NULL)
562 cl_req_page_done(env, page);
563 LASSERT(page->cp_req == NULL);
565 /* As the transfer for this page is being done, clear the flags */
566 cfs_spin_lock(&oap->oap_lock);
567 oap->oap_async_flags = 0;
568 cfs_spin_unlock(&oap->oap_lock);
570 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
571 /* Clear opg->ops_transfer_pinned before VM lock is released. */
572 opg->ops_transfer_pinned = 0;
574 cfs_spin_lock(&obj->oo_seatbelt);
575 LASSERT(opg->ops_submitter != NULL);
576 LASSERT(!cfs_list_empty(&opg->ops_inflight));
577 cfs_list_del_init(&opg->ops_inflight);
578 cfs_spin_unlock(&obj->oo_seatbelt);
580 opg->ops_submit_time = 0;
582 cl_page_completion(env, page, crt, rc);
585 if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
586 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
587 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
588 int bytes = oap->oap_count;
591 stats->os_lockless_reads += bytes;
593 stats->os_lockless_writes += bytes;
597 * This has to be the last operation with the page, as locks are
598 * released in cl_page_completion() and nothing except for the
599 * reference counter protects page from concurrent reclaim.
601 lu_ref_del(&page->cp_reference, "transfer", page);
603 * As page->cp_obj is pinned by a reference from page->cp_req, it is
604 * safe to call cl_page_put() without risking object destruction in a
605 * non-blocking context.
607 cl_page_put(env, page);
611 const static struct obd_async_page_ops osc_async_page_ops = {
612 .ap_make_ready = osc_make_ready,
613 .ap_refresh_count = osc_refresh_count,
614 .ap_completion = osc_completion
617 struct cl_page *osc_page_init(const struct lu_env *env,
618 struct cl_object *obj,
619 struct cl_page *page, cfs_page_t *vmpage)
621 struct osc_object *osc = cl2osc(obj);
622 struct osc_page *opg;
625 OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
627 void *oap = &opg->ops_oap;
630 opg->ops_to = CFS_PAGE_SIZE;
632 result = osc_prep_async_page(osc_export(osc),
633 NULL, osc->oo_oinfo, vmpage,
634 cl_offset(obj, page->cp_index),
636 opg, (void **)&oap, 1, NULL);
638 struct osc_io *oio = osc_env_io(env);
639 opg->ops_srvlock = osc_io_srvlock(oio);
640 cl_page_slice_add(page, &opg->ops_cl, obj,
644 * Cannot assert osc_page_protected() here as read-ahead
645 * creates temporary pages outside of a lock.
647 #ifdef INVARIANT_CHECK
648 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
650 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
653 return ERR_PTR(result);
657 * Helper function called by osc_io_submit() for every page in an immediate
658 * transfer (i.e., transferred synchronously).
660 void osc_io_submit_page(const struct lu_env *env,
661 struct osc_io *oio, struct osc_page *opg,
662 enum cl_req_type crt)
664 struct osc_async_page *oap = &opg->ops_oap;
665 struct client_obd *cli = oap->oap_cli;
668 LINVRNT(osc_page_protected(env, opg,
669 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
671 oap->oap_page_off = opg->ops_from;
672 oap->oap_count = opg->ops_to - opg->ops_from;
673 /* Give a hint to OST that requests are coming from kswapd - bug19529 */
674 if (cfs_memory_pressure_get())
675 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
676 oap->oap_brw_flags |= OBD_BRW_SYNC;
677 if (osc_io_srvlock(oio))
678 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
680 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
681 if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
682 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
683 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
684 oap->oap_cmd |= OBD_BRW_NOQUOTA;
687 if (oap->oap_cmd & OBD_BRW_READ)
688 flags = ASYNC_COUNT_STABLE;
689 else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
690 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
692 cfs_spin_lock(&oap->oap_lock);
693 oap->oap_async_flags |= OSC_FLAGS | flags;
694 cfs_spin_unlock(&oap->oap_lock);
696 osc_oap_to_pending(oap);
697 osc_page_transfer_get(opg, "transfer\0imm");
698 osc_page_transfer_add(env, opg, crt);