4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
45 # error This file is kernel only.
49 #include <lustre_lite.h>
51 #include "vvp_internal.h"
53 /*****************************************************************************
59 static void vvp_page_fini_common(struct ccc_page *cp)
61 cfs_page_t *vmpage = cp->cpg_page;
63 LASSERT(vmpage != NULL);
64 page_cache_release(vmpage);
65 OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
68 static void vvp_page_fini(const struct lu_env *env,
69 struct cl_page_slice *slice)
71 struct ccc_page *cp = cl2ccc_page(slice);
72 cfs_page_t *vmpage = cp->cpg_page;
75 * vmpage->private was already cleared when page was moved into
78 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
79 vvp_page_fini_common(cp);
82 static int vvp_page_own(const struct lu_env *env,
83 const struct cl_page_slice *slice, struct cl_io *io,
86 struct ccc_page *vpg = cl2ccc_page(slice);
87 cfs_page_t *vmpage = vpg->cpg_page;
89 LASSERT(vmpage != NULL);
91 if (!trylock_page(vmpage))
94 if (unlikely(PageWriteback(vmpage))) {
103 wait_on_page_writeback(vmpage);
107 static void vvp_page_assume(const struct lu_env *env,
108 const struct cl_page_slice *slice,
109 struct cl_io *unused)
111 cfs_page_t *vmpage = cl2vm_page(slice);
113 LASSERT(vmpage != NULL);
114 LASSERT(PageLocked(vmpage));
115 wait_on_page_writeback(vmpage);
118 static void vvp_page_unassume(const struct lu_env *env,
119 const struct cl_page_slice *slice,
120 struct cl_io *unused)
122 cfs_page_t *vmpage = cl2vm_page(slice);
124 LASSERT(vmpage != NULL);
125 LASSERT(PageLocked(vmpage));
128 static void vvp_page_disown(const struct lu_env *env,
129 const struct cl_page_slice *slice, struct cl_io *io)
131 cfs_page_t *vmpage = cl2vm_page(slice);
133 LASSERT(vmpage != NULL);
134 LASSERT(PageLocked(vmpage));
136 unlock_page(cl2vm_page(slice));
139 static void vvp_page_discard(const struct lu_env *env,
140 const struct cl_page_slice *slice,
141 struct cl_io *unused)
143 cfs_page_t *vmpage = cl2vm_page(slice);
144 struct address_space *mapping;
145 struct ccc_page *cpg = cl2ccc_page(slice);
147 LASSERT(vmpage != NULL);
148 LASSERT(PageLocked(vmpage));
150 mapping = vmpage->mapping;
152 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
153 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
156 * truncate_complete_page() calls
157 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
159 truncate_complete_page(mapping, vmpage);
162 static int vvp_page_unmap(const struct lu_env *env,
163 const struct cl_page_slice *slice,
164 struct cl_io *unused)
166 cfs_page_t *vmpage = cl2vm_page(slice);
169 LASSERT(vmpage != NULL);
170 LASSERT(PageLocked(vmpage));
172 offset = vmpage->index << CFS_PAGE_SHIFT;
175 * XXX is it safe to call this with the page lock held?
177 ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
181 static void vvp_page_delete(const struct lu_env *env,
182 const struct cl_page_slice *slice)
184 cfs_page_t *vmpage = cl2vm_page(slice);
185 struct inode *inode = vmpage->mapping->host;
186 struct cl_object *obj = slice->cpl_obj;
188 LASSERT(PageLocked(vmpage));
189 LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
190 LASSERT(inode == ccc_object_inode(obj));
192 vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
193 ClearPagePrivate(vmpage);
196 * Reference from vmpage to cl_page is removed, but the reference back
197 * is still here. It is removed later in vvp_page_fini().
201 static void vvp_page_export(const struct lu_env *env,
202 const struct cl_page_slice *slice,
205 cfs_page_t *vmpage = cl2vm_page(slice);
207 LASSERT(vmpage != NULL);
208 LASSERT(PageLocked(vmpage));
210 SetPageUptodate(vmpage);
212 ClearPageUptodate(vmpage);
215 static int vvp_page_is_vmlocked(const struct lu_env *env,
216 const struct cl_page_slice *slice)
218 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
221 static int vvp_page_prep_read(const struct lu_env *env,
222 const struct cl_page_slice *slice,
223 struct cl_io *unused)
226 /* Skip the page already marked as PG_uptodate. */
227 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
230 static int vvp_page_prep_write(const struct lu_env *env,
231 const struct cl_page_slice *slice,
232 struct cl_io *unused)
234 cfs_page_t *vmpage = cl2vm_page(slice);
236 LASSERT(PageLocked(vmpage));
237 LASSERT(!PageDirty(vmpage));
239 set_page_writeback(vmpage);
240 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
246 * Handles page transfer errors at VM level.
248 * This takes inode as a separate argument, because inode on which error is to
249 * be set can be different from \a vmpage inode in case of direct-io.
251 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
253 struct ccc_object *obj = cl_inode2ccc(inode);
256 ClearPageError(vmpage);
257 obj->cob_discard_page_warned = 0;
259 SetPageError(vmpage);
260 if (ioret == -ENOSPC)
261 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
263 set_bit(AS_EIO, &inode->i_mapping->flags);
265 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
266 obj->cob_discard_page_warned == 0) {
267 obj->cob_discard_page_warned = 1;
268 ll_dirty_page_discard_warn(vmpage, ioret);
273 static void vvp_page_completion_read(const struct lu_env *env,
274 const struct cl_page_slice *slice,
277 struct ccc_page *cp = cl2ccc_page(slice);
278 cfs_page_t *vmpage = cp->cpg_page;
279 struct cl_page *page = cl_page_top(slice->cpl_page);
280 struct inode *inode = ccc_object_inode(page->cp_obj);
283 LASSERT(PageLocked(vmpage));
284 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
286 if (cp->cpg_defer_uptodate)
287 ll_ra_count_put(ll_i2sbi(inode), 1);
290 if (!cp->cpg_defer_uptodate)
291 cl_page_export(env, page, 1);
293 cp->cpg_defer_uptodate = 0;
295 if (page->cp_sync_io == NULL)
301 static void vvp_page_completion_write(const struct lu_env *env,
302 const struct cl_page_slice *slice,
305 struct ccc_page *cp = cl2ccc_page(slice);
306 struct cl_page *pg = slice->cpl_page;
307 cfs_page_t *vmpage = cp->cpg_page;
310 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
311 LASSERT(PageWriteback(vmpage));
313 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
316 * TODO: Actually it makes sense to add the page into oap pending
317 * list again and so that we don't need to take the page out from
318 * SoM write pending list, if we just meet a recoverable error,
320 * To implement this, we just need to return a non zero value in
321 * ->cpo_completion method. The underlying transfer should be notified
322 * and then re-add the page into pending transfer queue. -jay
325 cp->cpg_write_queued = 0;
326 vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
329 * Only mark the page error only when it's an async write because
330 * applications won't wait for IO to finish.
332 if (pg->cp_sync_io == NULL)
333 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
335 end_page_writeback(vmpage);
340 * Implements cl_page_operations::cpo_make_ready() method.
342 * This is called to yank a page from the transfer cache and to send it out as
343 * a part of transfer. This function try-locks the page. If try-lock failed,
344 * page is owned by some concurrent IO, and should be skipped (this is bad,
345 * but hopefully rare situation, as it usually results in transfer being
346 * shorter than possible).
348 * \retval 0 success, page can be placed into transfer
350 * \retval -EAGAIN page is either used by concurrent IO has been
351 * truncated. Skip it.
353 static int vvp_page_make_ready(const struct lu_env *env,
354 const struct cl_page_slice *slice)
356 cfs_page_t *vmpage = cl2vm_page(slice);
357 struct cl_page *pg = slice->cpl_page;
361 if (clear_page_dirty_for_io(vmpage)) {
362 LASSERT(pg->cp_state == CPS_CACHED);
363 /* This actually clears the dirty bit in the radix
365 set_page_writeback(vmpage);
366 vvp_write_pending(cl2ccc(slice->cpl_obj),
368 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
369 } else if (pg->cp_state == CPS_PAGEOUT) {
370 /* is it possible for osc_flush_async_page() to already
374 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
382 static int vvp_page_print(const struct lu_env *env,
383 const struct cl_page_slice *slice,
384 void *cookie, lu_printer_t printer)
386 struct ccc_page *vp = cl2ccc_page(slice);
387 cfs_page_t *vmpage = vp->cpg_page;
389 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
391 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
392 vp->cpg_write_queued, vmpage);
393 if (vmpage != NULL) {
394 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
395 (long)vmpage->flags, page_count(vmpage),
396 page_mapcount(vmpage), vmpage->private,
398 list_empty(&vmpage->lru) ? "not-" : "");
400 (*printer)(env, cookie, "\n");
404 static const struct cl_page_operations vvp_page_ops = {
405 .cpo_own = vvp_page_own,
406 .cpo_assume = vvp_page_assume,
407 .cpo_unassume = vvp_page_unassume,
408 .cpo_disown = vvp_page_disown,
409 .cpo_vmpage = ccc_page_vmpage,
410 .cpo_discard = vvp_page_discard,
411 .cpo_delete = vvp_page_delete,
412 .cpo_unmap = vvp_page_unmap,
413 .cpo_export = vvp_page_export,
414 .cpo_is_vmlocked = vvp_page_is_vmlocked,
415 .cpo_fini = vvp_page_fini,
416 .cpo_print = vvp_page_print,
417 .cpo_is_under_lock = ccc_page_is_under_lock,
420 .cpo_prep = vvp_page_prep_read,
421 .cpo_completion = vvp_page_completion_read,
422 .cpo_make_ready = ccc_fail,
425 .cpo_prep = vvp_page_prep_write,
426 .cpo_completion = vvp_page_completion_write,
427 .cpo_make_ready = vvp_page_make_ready,
432 static void vvp_transient_page_verify(const struct cl_page *page)
434 struct inode *inode = ccc_object_inode(page->cp_obj);
436 LASSERT(!mutex_trylock(&inode->i_mutex));
439 static int vvp_transient_page_own(const struct lu_env *env,
440 const struct cl_page_slice *slice,
441 struct cl_io *unused, int nonblock)
443 vvp_transient_page_verify(slice->cpl_page);
447 static void vvp_transient_page_assume(const struct lu_env *env,
448 const struct cl_page_slice *slice,
449 struct cl_io *unused)
451 vvp_transient_page_verify(slice->cpl_page);
454 static void vvp_transient_page_unassume(const struct lu_env *env,
455 const struct cl_page_slice *slice,
456 struct cl_io *unused)
458 vvp_transient_page_verify(slice->cpl_page);
461 static void vvp_transient_page_disown(const struct lu_env *env,
462 const struct cl_page_slice *slice,
463 struct cl_io *unused)
465 vvp_transient_page_verify(slice->cpl_page);
468 static void vvp_transient_page_discard(const struct lu_env *env,
469 const struct cl_page_slice *slice,
470 struct cl_io *unused)
472 struct cl_page *page = slice->cpl_page;
474 vvp_transient_page_verify(slice->cpl_page);
477 * For transient pages, remove it from the radix tree.
479 cl_page_delete(env, page);
482 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
483 const struct cl_page_slice *slice)
485 struct inode *inode = ccc_object_inode(slice->cpl_obj);
488 locked = !mutex_trylock(&inode->i_mutex);
490 mutex_unlock(&inode->i_mutex);
491 return locked ? -EBUSY : -ENODATA;
495 vvp_transient_page_completion(const struct lu_env *env,
496 const struct cl_page_slice *slice,
499 vvp_transient_page_verify(slice->cpl_page);
502 static void vvp_transient_page_fini(const struct lu_env *env,
503 struct cl_page_slice *slice)
505 struct ccc_page *cp = cl2ccc_page(slice);
506 struct cl_page *clp = slice->cpl_page;
507 struct ccc_object *clobj = cl2ccc(clp->cp_obj);
509 vvp_page_fini_common(cp);
510 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
511 clobj->cob_transient_pages--;
514 static const struct cl_page_operations vvp_transient_page_ops = {
515 .cpo_own = vvp_transient_page_own,
516 .cpo_assume = vvp_transient_page_assume,
517 .cpo_unassume = vvp_transient_page_unassume,
518 .cpo_disown = vvp_transient_page_disown,
519 .cpo_discard = vvp_transient_page_discard,
520 .cpo_vmpage = ccc_page_vmpage,
521 .cpo_fini = vvp_transient_page_fini,
522 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
523 .cpo_print = vvp_page_print,
524 .cpo_is_under_lock = ccc_page_is_under_lock,
527 .cpo_prep = ccc_transient_page_prep,
528 .cpo_completion = vvp_transient_page_completion,
531 .cpo_prep = ccc_transient_page_prep,
532 .cpo_completion = vvp_transient_page_completion,
537 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
538 struct cl_page *page, cfs_page_t *vmpage)
540 struct ccc_page *cpg;
543 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
545 OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
547 cpg->cpg_page = vmpage;
548 page_cache_get(vmpage);
550 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
551 if (page->cp_type == CPT_CACHEABLE) {
552 SetPagePrivate(vmpage);
553 vmpage->private = (unsigned long)page;
554 cl_page_slice_add(page, &cpg->cpg_cl, obj,
557 struct ccc_object *clobj = cl2ccc(obj);
559 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
560 cl_page_slice_add(page, &cpg->cpg_cl, obj,
561 &vvp_transient_page_ops);
562 clobj->cob_transient_pages++;
567 return ERR_PTR(result);