1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_OSC
43 #include "osc_cl_internal.h"
50 * Comment out osc_page_protected because it may sleep inside the
51 * the client_obd_list_lock.
52 * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
53 * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
54 * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
57 static int osc_page_is_dlocked(const struct lu_env *env,
58 const struct osc_page *opg,
59 enum cl_lock_mode mode, int pending, int unref)
62 struct osc_object *obj;
63 struct osc_thread_info *info;
64 struct ldlm_res_id *resname;
65 struct lustre_handle *lockh;
66 ldlm_policy_data_t *policy;
72 info = osc_env_info(env);
73 resname = &info->oti_resname;
74 policy = &info->oti_policy;
75 lockh = &info->oti_handle;
76 page = opg->ops_cl.cpl_page;
77 obj = cl2osc(opg->ops_cl.cpl_obj);
79 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
81 flags |= LDLM_FL_CBPENDING;
83 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
84 osc_lock_build_res(env, obj, resname);
85 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
86 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
87 dlmmode, &flags, NULL, lockh, unref);
91 * Checks an invariant that a page in the cache is covered by a lock, as
94 static int osc_page_protected(const struct lu_env *env,
95 const struct osc_page *opg,
96 enum cl_lock_mode mode, int unref)
98 struct cl_object_header *hdr;
100 struct cl_page *page;
101 struct cl_lock_descr *descr;
104 LINVRNT(!opg->ops_temp);
106 page = opg->ops_cl.cpl_page;
107 if (page->cp_owner != NULL &&
108 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
110 * If IO is done without locks (liblustre, or lloop), lock is
115 /* otherwise check for a DLM lock */
116 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
118 /* maybe this page is a part of a lockless io? */
119 hdr = cl_object_header(opg->ops_cl.cpl_obj);
120 descr = &osc_env_info(env)->oti_descr;
121 descr->cld_mode = mode;
122 descr->cld_start = page->cp_index;
123 descr->cld_end = page->cp_index;
124 cfs_spin_lock(&hdr->coh_lock_guard);
125 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
127 * Lock-less sub-lock has to be either in HELD state
128 * (when io is actively going on), or in CACHED state,
129 * when top-lock is being unlocked:
130 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
132 if ((scan->cll_state == CLS_HELD ||
133 scan->cll_state == CLS_CACHED) &&
134 cl_lock_ext_match(&scan->cll_descr, descr)) {
135 struct osc_lock *olck;
137 olck = osc_lock_at(scan);
138 result = osc_lock_is_lockless(olck);
142 cfs_spin_unlock(&hdr->coh_lock_guard);
147 static int osc_page_protected(const struct lu_env *env,
148 const struct osc_page *opg,
149 enum cl_lock_mode mode, int unref)
155 /*****************************************************************************
160 static void osc_page_fini(const struct lu_env *env,
161 struct cl_page_slice *slice)
163 struct osc_page *opg = cl2osc_page(slice);
164 CDEBUG(D_TRACE, "%p\n", opg);
165 OBD_SLAB_FREE_PTR(opg, osc_page_kmem);
168 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
170 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
172 LASSERT(!opg->ops_transfer_pinned);
174 lu_ref_add_atomic(&page->cp_reference, label, page);
175 opg->ops_transfer_pinned = 1;
178 static void osc_page_transfer_put(const struct lu_env *env,
179 struct osc_page *opg)
181 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
183 if (opg->ops_transfer_pinned) {
184 lu_ref_del(&page->cp_reference, "transfer", page);
185 opg->ops_transfer_pinned = 0;
186 cl_page_put(env, page);
191 * This is called once for every page when it is submitted for a transfer
192 * either opportunistic (osc_page_cache_add()), or immediate
193 * (osc_page_submit()).
195 static void osc_page_transfer_add(const struct lu_env *env,
196 struct osc_page *opg, enum cl_req_type crt)
198 struct osc_object *obj;
200 LINVRNT(cl_page_is_vmlocked(env, opg->ops_cl.cpl_page));
202 obj = cl2osc(opg->ops_cl.cpl_obj);
203 cfs_spin_lock(&obj->oo_seatbelt);
204 cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
205 opg->ops_submitter = cfs_current();
206 cfs_spin_unlock(&obj->oo_seatbelt);
209 static int osc_page_cache_add(const struct lu_env *env,
210 const struct cl_page_slice *slice,
211 struct cl_io *unused)
213 struct osc_page *opg = cl2osc_page(slice);
214 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
216 /* All cacheable IO is async-capable */
217 int brw_flags = OBD_BRW_ASYNC;
220 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
223 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
224 brw_flags |= opg->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
225 if (!client_is_remote(osc_export(obj)) &&
226 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
227 brw_flags |= OBD_BRW_NOQUOTA;
228 noquota = OBD_BRW_NOQUOTA;
231 osc_page_transfer_get(opg, "transfer\0cache");
232 result = osc_queue_async_io(env, osc_export(obj), NULL, obj->oo_oinfo,
233 &opg->ops_oap, OBD_BRW_WRITE | noquota,
236 osc_page_transfer_put(env, opg);
238 osc_page_transfer_add(env, opg, CRT_WRITE);
242 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
243 pgoff_t start, pgoff_t end)
245 memset(policy, 0, sizeof *policy);
246 policy->l_extent.start = cl_offset(obj, start);
247 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
250 static int osc_page_is_under_lock(const struct lu_env *env,
251 const struct cl_page_slice *slice,
252 struct cl_io *unused)
254 struct cl_lock *lock;
258 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
261 cl_lock_put(env, lock);
268 static int osc_page_fail(const struct lu_env *env,
269 const struct cl_page_slice *slice,
270 struct cl_io *unused)
280 static const char *osc_list(cfs_list_t *head)
282 return cfs_list_empty(head) ? "-" : "+";
285 static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
287 if (opg->ops_submit_time == 0)
290 return (cfs_time_current() - opg->ops_submit_time);
293 static int osc_page_print(const struct lu_env *env,
294 const struct cl_page_slice *slice,
295 void *cookie, lu_printer_t printer)
297 struct osc_page *opg = cl2osc_page(slice);
298 struct osc_async_page *oap = &opg->ops_oap;
299 struct osc_object *obj = cl2osc(slice->cpl_obj);
300 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
301 struct lov_oinfo *loi = obj->oo_oinfo;
303 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
304 "1< %#x %d %u %s %s %s > "
305 "2< "LPU64" %u %#x %#x | %p %p %p %p %p > "
306 "3< %s %p %d %lu %d > "
307 "4< %d %d %d %lu %s | %s %s %s %s > "
308 "5< %s %s %s %s | %d %s %s | %d %s %s>\n",
311 oap->oap_magic, oap->oap_cmd,
312 oap->oap_interrupted,
313 osc_list(&oap->oap_pending_item),
314 osc_list(&oap->oap_urgent_item),
315 osc_list(&oap->oap_rpc_item),
317 oap->oap_obj_off, oap->oap_page_off,
318 oap->oap_async_flags, oap->oap_brw_flags,
320 oap->oap_cli, oap->oap_loi, oap->oap_caller_ops,
321 oap->oap_caller_data,
323 osc_list(&opg->ops_inflight),
324 opg->ops_submitter, opg->ops_transfer_pinned,
325 osc_submit_duration(opg), opg->ops_srvlock,
327 cli->cl_r_in_flight, cli->cl_w_in_flight,
328 cli->cl_max_rpcs_in_flight,
330 osc_list(&cli->cl_cache_waiters),
331 osc_list(&cli->cl_loi_ready_list),
332 osc_list(&cli->cl_loi_hp_ready_list),
333 osc_list(&cli->cl_loi_write_list),
334 osc_list(&cli->cl_loi_read_list),
336 osc_list(&loi->loi_ready_item),
337 osc_list(&loi->loi_hp_ready_item),
338 osc_list(&loi->loi_write_item),
339 osc_list(&loi->loi_read_item),
340 loi->loi_read_lop.lop_num_pending,
341 osc_list(&loi->loi_read_lop.lop_pending),
342 osc_list(&loi->loi_read_lop.lop_urgent),
343 loi->loi_write_lop.lop_num_pending,
344 osc_list(&loi->loi_write_lop.lop_pending),
345 osc_list(&loi->loi_write_lop.lop_urgent));
348 static void osc_page_delete(const struct lu_env *env,
349 const struct cl_page_slice *slice)
351 struct osc_page *opg = cl2osc_page(slice);
352 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
353 struct osc_async_page *oap = &opg->ops_oap;
356 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
359 CDEBUG(D_TRACE, "%p\n", opg);
360 osc_page_transfer_put(env, opg);
361 rc = osc_teardown_async_page(osc_export(obj), NULL, obj->oo_oinfo, oap);
363 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
364 "Trying to teardown failed: %d\n", rc);
367 cfs_spin_lock(&obj->oo_seatbelt);
368 cfs_list_del_init(&opg->ops_inflight);
369 cfs_spin_unlock(&obj->oo_seatbelt);
373 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
376 struct osc_page *opg = cl2osc_page(slice);
377 struct osc_async_page *oap = &opg->ops_oap;
379 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
381 opg->ops_from = from;
383 cfs_spin_lock(&oap->oap_lock);
384 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
385 cfs_spin_unlock(&oap->oap_lock);
388 static int osc_page_cancel(const struct lu_env *env,
389 const struct cl_page_slice *slice)
391 struct osc_page *opg = cl2osc_page(slice);
392 struct osc_async_page *oap = &opg->ops_oap;
395 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
397 client_obd_list_lock(&oap->oap_cli->cl_loi_list_lock);
398 /* Check if the transferring against this page
399 * is completed, or not even queued. */
400 if (opg->ops_transfer_pinned)
401 /* FIXME: may not be interrupted.. */
402 rc = osc_oap_interrupted(env, oap);
403 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
404 client_obd_list_unlock(&oap->oap_cli->cl_loi_list_lock);
408 static const struct cl_page_operations osc_page_ops = {
409 .cpo_fini = osc_page_fini,
410 .cpo_print = osc_page_print,
411 .cpo_delete = osc_page_delete,
412 .cpo_is_under_lock = osc_page_is_under_lock,
415 .cpo_cache_add = osc_page_fail
418 .cpo_cache_add = osc_page_cache_add
421 .cpo_clip = osc_page_clip,
422 .cpo_cancel = osc_page_cancel
425 static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
427 struct osc_page *opg = data;
428 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
431 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
432 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
435 result = cl_page_make_ready(env, page, CRT_WRITE);
437 opg->ops_submit_time = cfs_time_current();
441 static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
443 struct cl_page *page;
444 struct osc_page *osc = data;
445 struct cl_object *obj;
446 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
451 LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
453 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
454 LASSERT(!(cmd & OBD_BRW_READ));
455 LASSERT(osc != NULL);
456 page = osc->ops_cl.cpl_page;
457 obj = osc->ops_cl.cpl_obj;
459 cl_object_attr_lock(obj);
460 result = cl_object_attr_get(env, obj, attr);
461 cl_object_attr_unlock(obj);
465 if (cl_offset(obj, page->cp_index) >= kms)
466 /* catch race with truncate */
468 else if (cl_offset(obj, page->cp_index + 1) > kms)
469 /* catch sub-page write at end of file */
470 return kms % CFS_PAGE_SIZE;
472 return CFS_PAGE_SIZE;
475 static int osc_completion(const struct lu_env *env,
476 void *data, int cmd, struct obdo *oa, int rc)
478 struct osc_page *opg = data;
479 struct osc_async_page *oap = &opg->ops_oap;
480 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
481 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
482 enum cl_req_type crt;
484 LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
485 LINVRNT(cl_page_is_vmlocked(env, page));
489 cmd &= ~OBD_BRW_NOQUOTA;
490 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
491 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
492 LASSERT(opg->ops_transfer_pinned);
495 * page->cp_req can be NULL if io submission failed before
496 * cl_req was allocated.
498 if (page->cp_req != NULL)
499 cl_req_page_done(env, page);
500 LASSERT(page->cp_req == NULL);
502 /* As the transfer for this page is being done, clear the flags */
503 cfs_spin_lock(&oap->oap_lock);
504 oap->oap_async_flags = 0;
505 cfs_spin_unlock(&oap->oap_lock);
507 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
508 /* Clear opg->ops_transfer_pinned before VM lock is released. */
509 opg->ops_transfer_pinned = 0;
511 cfs_spin_lock(&obj->oo_seatbelt);
512 LASSERT(opg->ops_submitter != NULL);
513 LASSERT(!cfs_list_empty(&opg->ops_inflight));
514 cfs_list_del_init(&opg->ops_inflight);
515 cfs_spin_unlock(&obj->oo_seatbelt);
517 opg->ops_submit_time = 0;
519 cl_page_completion(env, page, crt, rc);
522 if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
523 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
524 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
525 int bytes = opg->ops_to - opg->ops_from;
528 stats->os_lockless_reads += bytes;
530 stats->os_lockless_writes += bytes;
534 * This has to be the last operation with the page, as locks are
535 * released in cl_page_completion() and nothing except for the
536 * reference counter protects page from concurrent reclaim.
538 lu_ref_del(&page->cp_reference, "transfer", page);
540 * As page->cp_obj is pinned by a reference from page->cp_req, it is
541 * safe to call cl_page_put() without risking object destruction in a
542 * non-blocking context.
544 cl_page_put(env, page);
548 const static struct obd_async_page_ops osc_async_page_ops = {
549 .ap_make_ready = osc_make_ready,
550 .ap_refresh_count = osc_refresh_count,
551 .ap_completion = osc_completion
554 struct cl_page *osc_page_init(const struct lu_env *env,
555 struct cl_object *obj,
556 struct cl_page *page, cfs_page_t *vmpage)
558 struct osc_object *osc = cl2osc(obj);
559 struct osc_page *opg;
562 OBD_SLAB_ALLOC_PTR_GFP(opg, osc_page_kmem, CFS_ALLOC_IO);
564 void *oap = &opg->ops_oap;
567 opg->ops_to = CFS_PAGE_SIZE;
569 result = osc_prep_async_page(osc_export(osc),
570 NULL, osc->oo_oinfo, vmpage,
571 cl_offset(obj, page->cp_index),
573 opg, (void **)&oap, 1, NULL);
575 struct osc_io *oio = osc_env_io(env);
576 opg->ops_srvlock = osc_io_srvlock(oio);
577 cl_page_slice_add(page, &opg->ops_cl, obj,
581 * Cannot assert osc_page_protected() here as read-ahead
582 * creates temporary pages outside of a lock.
584 #ifdef INVARIANT_CHECK
585 opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
587 CFS_INIT_LIST_HEAD(&opg->ops_inflight);
590 return ERR_PTR(result);
594 * Helper function called by osc_io_submit() for every page in an immediate
595 * transfer (i.e., transferred synchronously).
597 void osc_io_submit_page(const struct lu_env *env,
598 struct osc_io *oio, struct osc_page *opg,
599 enum cl_req_type crt)
601 struct osc_async_page *oap = &opg->ops_oap;
602 struct client_obd *cli = oap->oap_cli;
605 LINVRNT(osc_page_protected(env, opg,
606 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
608 oap->oap_page_off = opg->ops_from;
609 oap->oap_count = opg->ops_to - opg->ops_from;
610 /* Give a hint to OST that requests are coming from kswapd - bug19529 */
611 if (cfs_memory_pressure_get())
612 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
613 oap->oap_brw_flags |= OBD_BRW_SYNC;
614 if (osc_io_srvlock(oio))
615 oap->oap_brw_flags |= OBD_BRW_SRVLOCK;
617 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
618 if (!client_is_remote(osc_export(cl2osc(opg->ops_cl.cpl_obj))) &&
619 cfs_capable(CFS_CAP_SYS_RESOURCE)) {
620 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
621 oap->oap_cmd |= OBD_BRW_NOQUOTA;
624 if (oap->oap_cmd & OBD_BRW_READ)
625 flags = ASYNC_COUNT_STABLE;
626 else if (!(oap->oap_brw_page.flag & OBD_BRW_FROM_GRANT))
627 osc_enter_cache_try(env, cli, oap->oap_loi, oap, 1);
629 cfs_spin_lock(&oap->oap_lock);
630 oap->oap_async_flags |= OSC_FLAGS | flags;
631 cfs_spin_unlock(&oap->oap_lock);
633 osc_oap_to_pending(oap);
634 osc_page_transfer_get(opg, "transfer\0imm");
635 osc_page_transfer_add(env, opg, crt);