1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LLITE
44 # error This file is kernel only.
48 #include <lustre_lite.h>
50 #include "vvp_internal.h"
52 /*****************************************************************************
58 static void vvp_page_fini_common(struct ccc_page *cp)
60 cfs_page_t *vmpage = cp->cpg_page;
62 LASSERT(vmpage != NULL);
63 page_cache_release(vmpage);
64 OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
67 static void vvp_page_fini(const struct lu_env *env,
68 struct cl_page_slice *slice)
70 struct ccc_page *cp = cl2ccc_page(slice);
71 cfs_page_t *vmpage = cp->cpg_page;
74 * vmpage->private was already cleared when page was moved into
77 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78 vvp_page_fini_common(cp);
81 static int vvp_page_own(const struct lu_env *env,
82 const struct cl_page_slice *slice, struct cl_io *io,
85 struct ccc_page *vpg = cl2ccc_page(slice);
86 cfs_page_t *vmpage = vpg->cpg_page;
88 LASSERT(vmpage != NULL);
90 if (TestSetPageLocked(vmpage))
93 if (unlikely(PageWriteback(vmpage))) {
102 wait_on_page_writeback(vmpage);
106 static void vvp_page_assume(const struct lu_env *env,
107 const struct cl_page_slice *slice,
108 struct cl_io *unused)
110 cfs_page_t *vmpage = cl2vm_page(slice);
112 LASSERT(vmpage != NULL);
113 LASSERT(PageLocked(vmpage));
114 wait_on_page_writeback(vmpage);
117 static void vvp_page_unassume(const struct lu_env *env,
118 const struct cl_page_slice *slice,
119 struct cl_io *unused)
121 cfs_page_t *vmpage = cl2vm_page(slice);
123 LASSERT(vmpage != NULL);
124 LASSERT(PageLocked(vmpage));
127 static void vvp_page_disown(const struct lu_env *env,
128 const struct cl_page_slice *slice, struct cl_io *io)
130 cfs_page_t *vmpage = cl2vm_page(slice);
132 LASSERT(vmpage != NULL);
133 LASSERT(PageLocked(vmpage));
135 unlock_page(cl2vm_page(slice));
138 static void vvp_page_discard(const struct lu_env *env,
139 const struct cl_page_slice *slice,
140 struct cl_io *unused)
142 cfs_page_t *vmpage = cl2vm_page(slice);
143 struct address_space *mapping = vmpage->mapping;
144 struct ccc_page *cpg = cl2ccc_page(slice);
146 LASSERT(vmpage != NULL);
147 LASSERT(PageLocked(vmpage));
149 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
150 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
153 * truncate_complete_page() calls
154 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
156 truncate_complete_page(mapping, vmpage);
159 static int vvp_page_unmap(const struct lu_env *env,
160 const struct cl_page_slice *slice,
161 struct cl_io *unused)
163 cfs_page_t *vmpage = cl2vm_page(slice);
164 __u64 offset = vmpage->index << CFS_PAGE_SHIFT;
166 LASSERT(vmpage != NULL);
167 LASSERT(PageLocked(vmpage));
169 * XXX is it safe to call this with the page lock held?
171 ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
175 static void vvp_page_delete(const struct lu_env *env,
176 const struct cl_page_slice *slice)
178 cfs_page_t *vmpage = cl2vm_page(slice);
179 struct inode *inode = vmpage->mapping->host;
180 struct cl_object *obj = slice->cpl_obj;
182 LASSERT(PageLocked(vmpage));
183 LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
184 LASSERT(inode == ccc_object_inode(obj));
186 vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
187 ClearPagePrivate(vmpage);
190 * Reference from vmpage to cl_page is removed, but the reference back
191 * is still here. It is removed later in vvp_page_fini().
195 static void vvp_page_export(const struct lu_env *env,
196 const struct cl_page_slice *slice,
199 cfs_page_t *vmpage = cl2vm_page(slice);
201 LASSERT(vmpage != NULL);
202 LASSERT(PageLocked(vmpage));
204 SetPageUptodate(vmpage);
206 ClearPageUptodate(vmpage);
209 static int vvp_page_is_vmlocked(const struct lu_env *env,
210 const struct cl_page_slice *slice)
212 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
215 static int vvp_page_prep_read(const struct lu_env *env,
216 const struct cl_page_slice *slice,
217 struct cl_io *unused)
220 /* Skip the page already marked as PG_uptodate. */
221 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
224 static int vvp_page_prep_write(const struct lu_env *env,
225 const struct cl_page_slice *slice,
226 struct cl_io *unused)
228 struct cl_page *cp = slice->cpl_page;
229 cfs_page_t *vmpage = cl2vm_page(slice);
232 if (clear_page_dirty_for_io(vmpage)) {
233 set_page_writeback(vmpage);
234 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
237 /* only turn on writeback for async write. */
238 if (cp->cp_sync_io == NULL)
246 * Handles page transfer errors at VM level.
248 * This takes inode as a separate argument, because inode on which error is to
249 * be set can be different from \a vmpage inode in case of direct-io.
251 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
254 ClearPageError(vmpage);
255 else if (ioret != -EINTR) {
256 SetPageError(vmpage);
257 if (ioret == -ENOSPC)
258 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
260 set_bit(AS_EIO, &inode->i_mapping->flags);
264 static void vvp_page_completion_read(const struct lu_env *env,
265 const struct cl_page_slice *slice,
268 struct ccc_page *cp = cl2ccc_page(slice);
269 cfs_page_t *vmpage = cp->cpg_page;
270 struct cl_page *page = cl_page_top(slice->cpl_page);
271 struct inode *inode = ccc_object_inode(page->cp_obj);
274 LASSERT(PageLocked(vmpage));
275 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
277 if (cp->cpg_defer_uptodate)
278 ll_ra_count_put(ll_i2sbi(inode), 1);
281 if (!cp->cpg_defer_uptodate)
282 cl_page_export(env, page, 1);
284 cp->cpg_defer_uptodate = 0;
286 if (page->cp_sync_io == NULL)
292 static void vvp_page_completion_write(const struct lu_env *env,
293 const struct cl_page_slice *slice,
296 struct ccc_page *cp = cl2ccc_page(slice);
297 struct cl_page *pg = slice->cpl_page;
298 cfs_page_t *vmpage = cp->cpg_page;
301 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
302 LASSERT(PageWriteback(vmpage));
304 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
307 * TODO: Actually it makes sense to add the page into oap pending
308 * list again and so that we don't need to take the page out from
309 * SoM write pending list, if we just meet a recoverable error,
311 * To implement this, we just need to return a non zero value in
312 * ->cpo_completion method. The underlying transfer should be notified
313 * and then re-add the page into pending transfer queue. -jay
316 cp->cpg_write_queued = 0;
317 vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
320 * Only mark the page error only when it's an async write because
321 * applications won't wait for IO to finish.
323 if (pg->cp_sync_io == NULL)
324 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
326 end_page_writeback(vmpage);
331 * Implements cl_page_operations::cpo_make_ready() method.
333 * This is called to yank a page from the transfer cache and to send it out as
334 * a part of transfer. This function try-locks the page. If try-lock failed,
335 * page is owned by some concurrent IO, and should be skipped (this is bad,
336 * but hopefully rare situation, as it usually results in transfer being
337 * shorter than possible).
339 * \retval 0 success, page can be placed into transfer
341 * \retval -EAGAIN page is either used by concurrent IO has been
342 * truncated. Skip it.
344 static int vvp_page_make_ready(const struct lu_env *env,
345 const struct cl_page_slice *slice)
347 cfs_page_t *vmpage = cl2vm_page(slice);
348 struct cl_page *pg = slice->cpl_page;
352 /* we're trying to write, but the page is locked.. come back later */
353 if (!TestSetPageLocked(vmpage)) {
354 if (pg->cp_state == CPS_CACHED) {
356 * We can cancel IO if page wasn't dirty after all.
358 clear_page_dirty_for_io(vmpage);
360 * This actually clears the dirty bit in the radix
363 set_page_writeback(vmpage);
364 vvp_write_pending(cl2ccc(slice->cpl_obj),
366 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
370 * Page was concurrently truncated.
372 LASSERT(pg->cp_state == CPS_FREEING);
378 static int vvp_page_print(const struct lu_env *env,
379 const struct cl_page_slice *slice,
380 void *cookie, lu_printer_t printer)
382 struct ccc_page *vp = cl2ccc_page(slice);
383 cfs_page_t *vmpage = vp->cpg_page;
385 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
387 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
388 vp->cpg_write_queued, vmpage);
389 if (vmpage != NULL) {
390 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
391 (long)vmpage->flags, page_count(vmpage),
392 page_mapcount(vmpage), vmpage->private,
394 list_empty(&vmpage->lru) ? "not-" : "");
396 (*printer)(env, cookie, "\n");
400 static const struct cl_page_operations vvp_page_ops = {
401 .cpo_own = vvp_page_own,
402 .cpo_assume = vvp_page_assume,
403 .cpo_unassume = vvp_page_unassume,
404 .cpo_disown = vvp_page_disown,
405 .cpo_vmpage = ccc_page_vmpage,
406 .cpo_discard = vvp_page_discard,
407 .cpo_delete = vvp_page_delete,
408 .cpo_unmap = vvp_page_unmap,
409 .cpo_export = vvp_page_export,
410 .cpo_is_vmlocked = vvp_page_is_vmlocked,
411 .cpo_fini = vvp_page_fini,
412 .cpo_print = vvp_page_print,
413 .cpo_is_under_lock = ccc_page_is_under_lock,
416 .cpo_prep = vvp_page_prep_read,
417 .cpo_completion = vvp_page_completion_read,
418 .cpo_make_ready = ccc_fail,
421 .cpo_prep = vvp_page_prep_write,
422 .cpo_completion = vvp_page_completion_write,
423 .cpo_make_ready = vvp_page_make_ready,
428 static void vvp_transient_page_verify(const struct cl_page *page)
430 struct inode *inode = ccc_object_inode(page->cp_obj);
432 LASSERT(!TRYLOCK_INODE_MUTEX(inode));
433 /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
436 static int vvp_transient_page_own(const struct lu_env *env,
437 const struct cl_page_slice *slice,
438 struct cl_io *unused, int nonblock)
440 vvp_transient_page_verify(slice->cpl_page);
444 static void vvp_transient_page_assume(const struct lu_env *env,
445 const struct cl_page_slice *slice,
446 struct cl_io *unused)
448 vvp_transient_page_verify(slice->cpl_page);
451 static void vvp_transient_page_unassume(const struct lu_env *env,
452 const struct cl_page_slice *slice,
453 struct cl_io *unused)
455 vvp_transient_page_verify(slice->cpl_page);
458 static void vvp_transient_page_disown(const struct lu_env *env,
459 const struct cl_page_slice *slice,
460 struct cl_io *unused)
462 vvp_transient_page_verify(slice->cpl_page);
465 static void vvp_transient_page_discard(const struct lu_env *env,
466 const struct cl_page_slice *slice,
467 struct cl_io *unused)
469 struct cl_page *page = slice->cpl_page;
471 vvp_transient_page_verify(slice->cpl_page);
474 * For transient pages, remove it from the radix tree.
476 cl_page_delete(env, page);
479 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
480 const struct cl_page_slice *slice)
482 struct inode *inode = ccc_object_inode(slice->cpl_obj);
485 locked = !TRYLOCK_INODE_MUTEX(inode);
487 UNLOCK_INODE_MUTEX(inode);
488 return locked ? -EBUSY : -ENODATA;
492 vvp_transient_page_completion(const struct lu_env *env,
493 const struct cl_page_slice *slice,
496 vvp_transient_page_verify(slice->cpl_page);
499 static void vvp_transient_page_fini(const struct lu_env *env,
500 struct cl_page_slice *slice)
502 struct ccc_page *cp = cl2ccc_page(slice);
503 struct cl_page *clp = slice->cpl_page;
504 struct ccc_object *clobj = cl2ccc(clp->cp_obj);
506 vvp_page_fini_common(cp);
507 LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
508 clobj->cob_transient_pages--;
511 static const struct cl_page_operations vvp_transient_page_ops = {
512 .cpo_own = vvp_transient_page_own,
513 .cpo_assume = vvp_transient_page_assume,
514 .cpo_unassume = vvp_transient_page_unassume,
515 .cpo_disown = vvp_transient_page_disown,
516 .cpo_discard = vvp_transient_page_discard,
517 .cpo_vmpage = ccc_page_vmpage,
518 .cpo_fini = vvp_transient_page_fini,
519 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
520 .cpo_print = vvp_page_print,
521 .cpo_is_under_lock = ccc_page_is_under_lock,
524 .cpo_prep = ccc_transient_page_prep,
525 .cpo_completion = vvp_transient_page_completion,
528 .cpo_prep = ccc_transient_page_prep,
529 .cpo_completion = vvp_transient_page_completion,
534 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
535 struct cl_page *page, cfs_page_t *vmpage)
537 struct ccc_page *cpg;
540 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
542 OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
544 cpg->cpg_page = vmpage;
545 page_cache_get(vmpage);
547 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
548 if (page->cp_type == CPT_CACHEABLE) {
549 SetPagePrivate(vmpage);
550 vmpage->private = (unsigned long)page;
551 cl_page_slice_add(page, &cpg->cpg_cl, obj,
554 struct ccc_object *clobj = cl2ccc(obj);
556 LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
557 cl_page_slice_add(page, &cpg->cpg_cl, obj,
558 &vvp_transient_page_ops);
559 clobj->cob_transient_pages++;
564 return ERR_PTR(result);