1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LLITE
44 # error This file is kernel only.
48 #include <lustre_lite.h>
50 #include "vvp_internal.h"
52 /*****************************************************************************
58 static void vvp_page_fini_common(struct ccc_page *cp)
60 cfs_page_t *vmpage = cp->cpg_page;
62 LASSERT(vmpage != NULL);
63 page_cache_release(vmpage);
64 OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
67 static void vvp_page_fini(const struct lu_env *env,
68 struct cl_page_slice *slice)
70 struct ccc_page *cp = cl2ccc_page(slice);
71 cfs_page_t *vmpage = cp->cpg_page;
74 * vmpage->private was already cleared when page was moved into
77 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78 vvp_page_fini_common(cp);
81 static void vvp_page_own(const struct lu_env *env,
82 const struct cl_page_slice *slice, struct cl_io *io)
84 struct ccc_page *vpg = cl2ccc_page(slice);
85 cfs_page_t *vmpage = vpg->cpg_page;
88 LASSERT(vmpage != NULL);
90 /* DEBUG CODE FOR #18881 */
91 while (TestSetPageLocked(vmpage)) {
92 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
93 cfs_time_seconds(1)/10);
95 CL_PAGE_DEBUG(D_ERROR, env,
96 cl_page_top(slice->cpl_page),
97 "XXX page %p blocked on acquiring the"
98 " lock. process %s/%p, flags %lx,io %p\n",
99 vmpage, current->comm, current,
101 libcfs_debug_dumpstack(NULL);
102 if (slice->cpl_page->cp_task) {
103 cfs_task_t *tsk = slice->cpl_page->cp_task;
104 LCONSOLE_WARN("The page was owned by %s\n",
106 libcfs_debug_dumpstack(tsk);
108 LCONSOLE_WARN("Reproduced bug #18881,please contact:"
109 "jay <jinshan.xiong@sun.com>, thanks\n");
117 /* lock_page(vmpage); */
118 wait_on_page_writeback(vmpage);
121 static void vvp_page_assume(const struct lu_env *env,
122 const struct cl_page_slice *slice,
123 struct cl_io *unused)
125 cfs_page_t *vmpage = cl2vm_page(slice);
127 LASSERT(vmpage != NULL);
128 LASSERT(PageLocked(vmpage));
129 wait_on_page_writeback(vmpage);
132 static void vvp_page_unassume(const struct lu_env *env,
133 const struct cl_page_slice *slice,
134 struct cl_io *unused)
136 cfs_page_t *vmpage = cl2vm_page(slice);
138 LASSERT(vmpage != NULL);
139 LASSERT(PageLocked(vmpage));
142 static void vvp_page_disown(const struct lu_env *env,
143 const struct cl_page_slice *slice, struct cl_io *io)
145 cfs_page_t *vmpage = cl2vm_page(slice);
147 LASSERT(vmpage != NULL);
148 LASSERT(PageLocked(vmpage));
150 unlock_page(cl2vm_page(slice));
153 static void vvp_page_discard(const struct lu_env *env,
154 const struct cl_page_slice *slice,
155 struct cl_io *unused)
157 cfs_page_t *vmpage = cl2vm_page(slice);
158 struct address_space *mapping = vmpage->mapping;
159 struct ccc_page *cpg = cl2ccc_page(slice);
161 LASSERT(vmpage != NULL);
162 LASSERT(PageLocked(vmpage));
164 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
165 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
168 * truncate_complete_page() calls
169 * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
171 truncate_complete_page(mapping, vmpage);
174 static int vvp_page_unmap(const struct lu_env *env,
175 const struct cl_page_slice *slice,
176 struct cl_io *unused)
178 cfs_page_t *vmpage = cl2vm_page(slice);
179 __u64 offset = vmpage->index << CFS_PAGE_SHIFT;
181 LASSERT(vmpage != NULL);
182 LASSERT(PageLocked(vmpage));
184 * XXX is it safe to call this with the page lock held?
186 ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
190 static void vvp_page_delete(const struct lu_env *env,
191 const struct cl_page_slice *slice)
193 cfs_page_t *vmpage = cl2vm_page(slice);
194 struct inode *inode = vmpage->mapping->host;
195 struct cl_object *obj = slice->cpl_obj;
197 LASSERT(PageLocked(vmpage));
198 LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
199 LASSERT(inode == ccc_object_inode(obj));
201 vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
202 ClearPagePrivate(vmpage);
205 * Reference from vmpage to cl_page is removed, but the reference back
206 * is still here. It is removed later in vvp_page_fini().
210 static void vvp_page_export(const struct lu_env *env,
211 const struct cl_page_slice *slice,
214 cfs_page_t *vmpage = cl2vm_page(slice);
216 LASSERT(vmpage != NULL);
217 LASSERT(PageLocked(vmpage));
219 SetPageUptodate(vmpage);
221 ClearPageUptodate(vmpage);
224 static int vvp_page_is_vmlocked(const struct lu_env *env,
225 const struct cl_page_slice *slice)
227 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
230 static int vvp_page_prep_read(const struct lu_env *env,
231 const struct cl_page_slice *slice,
232 struct cl_io *unused)
235 /* Skip the page already marked as PG_uptodate. */
236 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
239 static int vvp_page_prep_write(const struct lu_env *env,
240 const struct cl_page_slice *slice,
241 struct cl_io *unused)
243 cfs_page_t *vmpage = cl2vm_page(slice);
246 if (clear_page_dirty_for_io(vmpage)) {
247 set_page_writeback(vmpage);
248 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
256 * Handles page transfer errors at VM level.
258 * This takes inode as a separate argument, because inode on which error is to
259 * be set can be different from \a vmpage inode in case of direct-io.
261 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
264 ClearPageError(vmpage);
265 else if (ioret != -EINTR) {
266 SetPageError(vmpage);
267 if (ioret == -ENOSPC)
268 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
270 set_bit(AS_EIO, &inode->i_mapping->flags);
274 static void vvp_page_completion_common(const struct lu_env *env,
275 struct ccc_page *cp, int ioret)
277 struct cl_page *clp = cp->cpg_cl.cpl_page;
278 cfs_page_t *vmpage = cp->cpg_page;
279 struct inode *inode = ccc_object_inode(clp->cp_obj);
280 struct cl_sync_io *anchor = cp->cpg_sync_io;
282 LINVRNT(cl_page_is_vmlocked(env, clp));
284 if (anchor != NULL) {
285 cp->cpg_sync_io = NULL;
286 cl_sync_io_note(anchor, ioret);
287 } else if (clp->cp_type == CPT_CACHEABLE) {
289 * Only mark the page error only when it's a cacheable page
292 * For sync IO and direct IO(CPT_TRANSIENT), the error is able
293 * to be seen by application, so we don't need to mark a page
296 vvp_vmpage_error(inode, vmpage, ioret);
301 static void vvp_page_completion_read(const struct lu_env *env,
302 const struct cl_page_slice *slice,
305 struct ccc_page *cp = cl2ccc_page(slice);
306 struct cl_page *page = cl_page_top(slice->cpl_page);
307 struct inode *inode = ccc_object_inode(page->cp_obj);
310 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
312 if (cp->cpg_defer_uptodate)
313 ll_ra_count_put(ll_i2sbi(inode), 1);
316 /* XXX: do we need this for transient pages? */
317 if (!cp->cpg_defer_uptodate)
318 cl_page_export(env, page, 1);
320 cp->cpg_defer_uptodate = 0;
321 vvp_page_completion_common(env, cp, ioret);
326 static void vvp_page_completion_write_common(const struct lu_env *env,
327 const struct cl_page_slice *slice,
330 struct ccc_page *cp = cl2ccc_page(slice);
333 * TODO: Actually it makes sense to add the page into oap pending
334 * list again and so that we don't need to take the page out from
335 * SoM write pending list, if we just meet a recoverable error,
337 * To implement this, we just need to return a non zero value in
338 * ->cpo_completion method. The underlying transfer should be notified
339 * and then re-add the page into pending transfer queue. -jay
341 cp->cpg_write_queued = 0;
342 vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
344 vvp_page_completion_common(env, cp, ioret);
347 static void vvp_page_completion_write(const struct lu_env *env,
348 const struct cl_page_slice *slice,
351 struct ccc_page *cp = cl2ccc_page(slice);
352 struct cl_page *pg = slice->cpl_page;
353 cfs_page_t *vmpage = cp->cpg_page;
357 LINVRNT(cl_page_is_vmlocked(env, pg));
358 LASSERT(PageWriteback(vmpage));
360 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
362 vvp_page_completion_write_common(env, slice, ioret);
363 end_page_writeback(vmpage);
368 * Implements cl_page_operations::cpo_make_ready() method.
370 * This is called to yank a page from the transfer cache and to send it out as
371 * a part of transfer. This function try-locks the page. If try-lock failed,
372 * page is owned by some concurrent IO, and should be skipped (this is bad,
373 * but hopefully rare situation, as it usually results in transfer being
374 * shorter than possible).
376 * \retval 0 success, page can be placed into transfer
378 * \retval -EAGAIN page is either used by concurrent IO has been
379 * truncated. Skip it.
381 static int vvp_page_make_ready(const struct lu_env *env,
382 const struct cl_page_slice *slice)
384 cfs_page_t *vmpage = cl2vm_page(slice);
385 struct cl_page *pg = slice->cpl_page;
389 /* we're trying to write, but the page is locked.. come back later */
390 if (!TestSetPageLocked(vmpage)) {
391 if (pg->cp_state == CPS_CACHED) {
393 * We can cancel IO if page wasn't dirty after all.
395 clear_page_dirty_for_io(vmpage);
397 * This actually clears the dirty bit in the radix
400 set_page_writeback(vmpage);
401 vvp_write_pending(cl2ccc(slice->cpl_obj),
403 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
407 * Page was concurrently truncated.
409 LASSERT(pg->cp_state == CPS_FREEING);
414 static int vvp_page_print(const struct lu_env *env,
415 const struct cl_page_slice *slice,
416 void *cookie, lu_printer_t printer)
418 struct ccc_page *vp = cl2ccc_page(slice);
419 cfs_page_t *vmpage = vp->cpg_page;
421 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
423 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
424 vp->cpg_write_queued, vmpage);
425 if (vmpage != NULL) {
426 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
427 (long)vmpage->flags, page_count(vmpage),
428 page_mapcount(vmpage), vmpage->private,
430 list_empty(&vmpage->lru) ? "not-" : "");
432 (*printer)(env, cookie, "\n");
436 static const struct cl_page_operations vvp_page_ops = {
437 .cpo_own = vvp_page_own,
438 .cpo_assume = vvp_page_assume,
439 .cpo_unassume = vvp_page_unassume,
440 .cpo_disown = vvp_page_disown,
441 .cpo_vmpage = ccc_page_vmpage,
442 .cpo_discard = vvp_page_discard,
443 .cpo_delete = vvp_page_delete,
444 .cpo_unmap = vvp_page_unmap,
445 .cpo_export = vvp_page_export,
446 .cpo_is_vmlocked = vvp_page_is_vmlocked,
447 .cpo_fini = vvp_page_fini,
448 .cpo_print = vvp_page_print,
449 .cpo_is_under_lock = ccc_page_is_under_lock,
452 .cpo_prep = vvp_page_prep_read,
453 .cpo_completion = vvp_page_completion_read,
454 .cpo_make_ready = ccc_fail,
457 .cpo_prep = vvp_page_prep_write,
458 .cpo_completion = vvp_page_completion_write,
459 .cpo_make_ready = vvp_page_make_ready,
464 static void vvp_transient_page_verify(const struct cl_page *page)
466 struct inode *inode = ccc_object_inode(page->cp_obj);
468 LASSERT(!TRYLOCK_INODE_MUTEX(inode));
469 /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
472 static void vvp_transient_page_own(const struct lu_env *env,
473 const struct cl_page_slice *slice,
474 struct cl_io *unused)
476 vvp_transient_page_verify(slice->cpl_page);
479 static void vvp_transient_page_assume(const struct lu_env *env,
480 const struct cl_page_slice *slice,
481 struct cl_io *unused)
483 vvp_transient_page_verify(slice->cpl_page);
486 static void vvp_transient_page_unassume(const struct lu_env *env,
487 const struct cl_page_slice *slice,
488 struct cl_io *unused)
490 vvp_transient_page_verify(slice->cpl_page);
493 static void vvp_transient_page_disown(const struct lu_env *env,
494 const struct cl_page_slice *slice,
495 struct cl_io *unused)
497 vvp_transient_page_verify(slice->cpl_page);
500 static void vvp_transient_page_discard(const struct lu_env *env,
501 const struct cl_page_slice *slice,
502 struct cl_io *unused)
504 struct cl_page *page = slice->cpl_page;
506 vvp_transient_page_verify(slice->cpl_page);
509 * For transient pages, remove it from the radix tree.
511 cl_page_delete(env, page);
514 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
515 const struct cl_page_slice *slice)
517 struct inode *inode = ccc_object_inode(slice->cpl_obj);
520 locked = !TRYLOCK_INODE_MUTEX(inode);
522 UNLOCK_INODE_MUTEX(inode);
523 return locked ? -EBUSY : -ENODATA;
527 vvp_transient_page_completion_write(const struct lu_env *env,
528 const struct cl_page_slice *slice,
531 vvp_transient_page_verify(slice->cpl_page);
532 vvp_page_completion_write_common(env, slice, ioret);
536 static void vvp_transient_page_fini(const struct lu_env *env,
537 struct cl_page_slice *slice)
539 struct ccc_page *cp = cl2ccc_page(slice);
540 struct cl_page *clp = slice->cpl_page;
541 struct ccc_object *clobj = cl2ccc(clp->cp_obj);
543 vvp_page_fini_common(cp);
544 LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
545 clobj->cob_transient_pages--;
548 static const struct cl_page_operations vvp_transient_page_ops = {
549 .cpo_own = vvp_transient_page_own,
550 .cpo_assume = vvp_transient_page_assume,
551 .cpo_unassume = vvp_transient_page_unassume,
552 .cpo_disown = vvp_transient_page_disown,
553 .cpo_discard = vvp_transient_page_discard,
554 .cpo_vmpage = ccc_page_vmpage,
555 .cpo_fini = vvp_transient_page_fini,
556 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
557 .cpo_print = vvp_page_print,
558 .cpo_is_under_lock = ccc_page_is_under_lock,
561 .cpo_prep = ccc_transient_page_prep,
562 .cpo_completion = vvp_page_completion_read,
565 .cpo_prep = ccc_transient_page_prep,
566 .cpo_completion = vvp_transient_page_completion_write,
571 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
572 struct cl_page *page, cfs_page_t *vmpage)
574 struct ccc_page *cpg;
577 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
579 OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
581 cpg->cpg_page = vmpage;
582 page_cache_get(vmpage);
584 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
585 if (page->cp_type == CPT_CACHEABLE) {
586 SetPagePrivate(vmpage);
587 vmpage->private = (unsigned long)page;
588 cl_page_slice_add(page, &cpg->cpg_cl, obj,
591 struct ccc_object *clobj = cl2ccc(obj);
593 LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
594 cl_page_slice_add(page, &cpg->cpg_cl, obj,
595 &vvp_transient_page_ops);
596 clobj->cob_transient_pages++;
601 return ERR_PTR(result);