4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
45 # error This file is kernel only.
49 #include <lustre_lite.h>
51 #include "vvp_internal.h"
53 /*****************************************************************************
59 static void vvp_page_fini_common(struct ccc_page *cp)
61 struct page *vmpage = cp->cpg_page;
63 LASSERT(vmpage != NULL);
64 page_cache_release(vmpage);
67 static void vvp_page_fini(const struct lu_env *env,
68 struct cl_page_slice *slice)
70 struct ccc_page *cp = cl2ccc_page(slice);
71 struct page *vmpage = cp->cpg_page;
74 * vmpage->private was already cleared when page was moved into
77 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78 vvp_page_fini_common(cp);
81 static int vvp_page_own(const struct lu_env *env,
82 const struct cl_page_slice *slice, struct cl_io *io,
85 struct ccc_page *vpg = cl2ccc_page(slice);
86 struct page *vmpage = vpg->cpg_page;
88 LASSERT(vmpage != NULL);
90 if (!trylock_page(vmpage))
93 if (unlikely(PageWriteback(vmpage))) {
102 wait_on_page_writeback(vmpage);
106 static void vvp_page_assume(const struct lu_env *env,
107 const struct cl_page_slice *slice,
108 struct cl_io *unused)
110 struct page *vmpage = cl2vm_page(slice);
112 LASSERT(vmpage != NULL);
113 LASSERT(PageLocked(vmpage));
114 wait_on_page_writeback(vmpage);
117 static void vvp_page_unassume(const struct lu_env *env,
118 const struct cl_page_slice *slice,
119 struct cl_io *unused)
121 struct page *vmpage = cl2vm_page(slice);
123 LASSERT(vmpage != NULL);
124 LASSERT(PageLocked(vmpage));
127 static void vvp_page_disown(const struct lu_env *env,
128 const struct cl_page_slice *slice, struct cl_io *io)
130 struct page *vmpage = cl2vm_page(slice);
132 LASSERT(vmpage != NULL);
133 LASSERT(PageLocked(vmpage));
135 unlock_page(cl2vm_page(slice));
138 static void vvp_page_discard(const struct lu_env *env,
139 const struct cl_page_slice *slice,
140 struct cl_io *unused)
142 struct page *vmpage = cl2vm_page(slice);
143 struct ccc_page *cpg = cl2ccc_page(slice);
145 LASSERT(vmpage != NULL);
146 LASSERT(PageLocked(vmpage));
148 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
149 ll_ra_stats_inc(vmpage->mapping, RA_STAT_DISCARDED);
151 ll_invalidate_page(vmpage);
154 static void vvp_page_delete(const struct lu_env *env,
155 const struct cl_page_slice *slice)
157 struct page *vmpage = cl2vm_page(slice);
158 struct inode *inode = vmpage->mapping->host;
159 struct cl_object *obj = slice->cpl_obj;
160 struct cl_page *page = slice->cpl_page;
163 LASSERT(PageLocked(vmpage));
164 LASSERT((struct cl_page *)vmpage->private == page);
165 LASSERT(inode == ccc_object_inode(obj));
167 vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
169 /* Drop the reference count held in vvp_page_init */
170 refc = atomic_dec_return(&page->cp_ref);
171 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
173 ClearPageUptodate(vmpage);
174 ClearPagePrivate(vmpage);
177 * Reference from vmpage to cl_page is removed, but the reference back
178 * is still here. It is removed later in vvp_page_fini().
182 static void vvp_page_export(const struct lu_env *env,
183 const struct cl_page_slice *slice,
186 struct page *vmpage = cl2vm_page(slice);
188 LASSERT(vmpage != NULL);
189 LASSERT(PageLocked(vmpage));
191 SetPageUptodate(vmpage);
193 ClearPageUptodate(vmpage);
196 static int vvp_page_is_vmlocked(const struct lu_env *env,
197 const struct cl_page_slice *slice)
199 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
202 static int vvp_page_prep_read(const struct lu_env *env,
203 const struct cl_page_slice *slice,
204 struct cl_io *unused)
207 /* Skip the page already marked as PG_uptodate. */
208 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
211 static int vvp_page_prep_write(const struct lu_env *env,
212 const struct cl_page_slice *slice,
213 struct cl_io *unused)
215 struct page *vmpage = cl2vm_page(slice);
217 LASSERT(PageLocked(vmpage));
218 LASSERT(!PageDirty(vmpage));
220 set_page_writeback(vmpage);
221 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
227 * Handles page transfer errors at VM level.
229 * This takes inode as a separate argument, because inode on which error is to
230 * be set can be different from \a vmpage inode in case of direct-io.
232 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
234 struct ccc_object *obj = cl_inode2ccc(inode);
237 ClearPageError(vmpage);
238 obj->cob_discard_page_warned = 0;
240 SetPageError(vmpage);
241 if (ioret == -ENOSPC)
242 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
244 set_bit(AS_EIO, &inode->i_mapping->flags);
246 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
247 obj->cob_discard_page_warned == 0) {
248 obj->cob_discard_page_warned = 1;
249 ll_dirty_page_discard_warn(vmpage, ioret);
254 static void vvp_page_completion_read(const struct lu_env *env,
255 const struct cl_page_slice *slice,
258 struct ccc_page *cp = cl2ccc_page(slice);
259 struct page *vmpage = cp->cpg_page;
260 struct cl_page *page = slice->cpl_page;
261 struct inode *inode = ccc_object_inode(page->cp_obj);
264 LASSERT(PageLocked(vmpage));
265 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
267 if (cp->cpg_defer_uptodate)
268 ll_ra_count_put(ll_i2sbi(inode), 1);
271 if (!cp->cpg_defer_uptodate)
272 cl_page_export(env, page, 1);
274 cp->cpg_defer_uptodate = 0;
276 if (page->cp_sync_io == NULL)
282 static void vvp_page_completion_write(const struct lu_env *env,
283 const struct cl_page_slice *slice,
286 struct ccc_page *cp = cl2ccc_page(slice);
287 struct cl_page *pg = slice->cpl_page;
288 struct page *vmpage = cp->cpg_page;
291 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
292 LASSERT(PageWriteback(vmpage));
294 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
297 * TODO: Actually it makes sense to add the page into oap pending
298 * list again and so that we don't need to take the page out from
299 * SoM write pending list, if we just meet a recoverable error,
301 * To implement this, we just need to return a non zero value in
302 * ->cpo_completion method. The underlying transfer should be notified
303 * and then re-add the page into pending transfer queue. -jay
306 cp->cpg_write_queued = 0;
307 vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
310 * Only mark the page error only when it's an async write because
311 * applications won't wait for IO to finish.
313 if (pg->cp_sync_io == NULL)
314 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
316 end_page_writeback(vmpage);
321 * Implements cl_page_operations::cpo_make_ready() method.
323 * This is called to yank a page from the transfer cache and to send it out as
324 * a part of transfer. This function try-locks the page. If try-lock failed,
325 * page is owned by some concurrent IO, and should be skipped (this is bad,
326 * but hopefully rare situation, as it usually results in transfer being
327 * shorter than possible).
329 * \retval 0 success, page can be placed into transfer
331 * \retval -EAGAIN page is either used by concurrent IO has been
332 * truncated. Skip it.
334 static int vvp_page_make_ready(const struct lu_env *env,
335 const struct cl_page_slice *slice)
337 struct page *vmpage = cl2vm_page(slice);
338 struct cl_page *pg = slice->cpl_page;
342 if (clear_page_dirty_for_io(vmpage)) {
343 LASSERT(pg->cp_state == CPS_CACHED);
344 /* This actually clears the dirty bit in the radix
346 set_page_writeback(vmpage);
347 vvp_write_pending(cl2ccc(slice->cpl_obj),
349 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
350 } else if (pg->cp_state == CPS_PAGEOUT) {
351 /* is it possible for osc_flush_async_page() to already
355 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
363 static int vvp_page_print(const struct lu_env *env,
364 const struct cl_page_slice *slice,
365 void *cookie, lu_printer_t printer)
367 struct ccc_page *vp = cl2ccc_page(slice);
368 struct page *vmpage = vp->cpg_page;
370 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
372 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
373 vp->cpg_write_queued, vmpage);
374 if (vmpage != NULL) {
375 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
376 (long)vmpage->flags, page_count(vmpage),
377 page_mapcount(vmpage), vmpage->private,
379 list_empty(&vmpage->lru) ? "not-" : "");
381 (*printer)(env, cookie, "\n");
385 static const struct cl_page_operations vvp_page_ops = {
386 .cpo_own = vvp_page_own,
387 .cpo_assume = vvp_page_assume,
388 .cpo_unassume = vvp_page_unassume,
389 .cpo_disown = vvp_page_disown,
390 .cpo_discard = vvp_page_discard,
391 .cpo_delete = vvp_page_delete,
392 .cpo_export = vvp_page_export,
393 .cpo_is_vmlocked = vvp_page_is_vmlocked,
394 .cpo_fini = vvp_page_fini,
395 .cpo_print = vvp_page_print,
396 .cpo_is_under_lock = ccc_page_is_under_lock,
399 .cpo_prep = vvp_page_prep_read,
400 .cpo_completion = vvp_page_completion_read,
401 .cpo_make_ready = ccc_fail,
404 .cpo_prep = vvp_page_prep_write,
405 .cpo_completion = vvp_page_completion_write,
406 .cpo_make_ready = vvp_page_make_ready,
411 static void vvp_transient_page_verify(const struct cl_page *page)
413 struct inode *inode = ccc_object_inode(page->cp_obj);
415 LASSERT(!mutex_trylock(&inode->i_mutex));
418 static int vvp_transient_page_own(const struct lu_env *env,
419 const struct cl_page_slice *slice,
420 struct cl_io *unused, int nonblock)
422 vvp_transient_page_verify(slice->cpl_page);
426 static void vvp_transient_page_assume(const struct lu_env *env,
427 const struct cl_page_slice *slice,
428 struct cl_io *unused)
430 vvp_transient_page_verify(slice->cpl_page);
433 static void vvp_transient_page_unassume(const struct lu_env *env,
434 const struct cl_page_slice *slice,
435 struct cl_io *unused)
437 vvp_transient_page_verify(slice->cpl_page);
440 static void vvp_transient_page_disown(const struct lu_env *env,
441 const struct cl_page_slice *slice,
442 struct cl_io *unused)
444 vvp_transient_page_verify(slice->cpl_page);
447 static void vvp_transient_page_discard(const struct lu_env *env,
448 const struct cl_page_slice *slice,
449 struct cl_io *unused)
451 struct cl_page *page = slice->cpl_page;
453 vvp_transient_page_verify(slice->cpl_page);
456 * For transient pages, remove it from the radix tree.
458 cl_page_delete(env, page);
461 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
462 const struct cl_page_slice *slice)
464 struct inode *inode = ccc_object_inode(slice->cpl_obj);
467 locked = !mutex_trylock(&inode->i_mutex);
469 mutex_unlock(&inode->i_mutex);
470 return locked ? -EBUSY : -ENODATA;
474 vvp_transient_page_completion(const struct lu_env *env,
475 const struct cl_page_slice *slice,
478 vvp_transient_page_verify(slice->cpl_page);
481 static void vvp_transient_page_fini(const struct lu_env *env,
482 struct cl_page_slice *slice)
484 struct ccc_page *cp = cl2ccc_page(slice);
485 struct cl_page *clp = slice->cpl_page;
486 struct ccc_object *clobj = cl2ccc(clp->cp_obj);
488 vvp_page_fini_common(cp);
489 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
490 clobj->cob_transient_pages--;
493 static const struct cl_page_operations vvp_transient_page_ops = {
494 .cpo_own = vvp_transient_page_own,
495 .cpo_assume = vvp_transient_page_assume,
496 .cpo_unassume = vvp_transient_page_unassume,
497 .cpo_disown = vvp_transient_page_disown,
498 .cpo_discard = vvp_transient_page_discard,
499 .cpo_fini = vvp_transient_page_fini,
500 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
501 .cpo_print = vvp_page_print,
502 .cpo_is_under_lock = ccc_page_is_under_lock,
505 .cpo_prep = ccc_transient_page_prep,
506 .cpo_completion = vvp_transient_page_completion,
509 .cpo_prep = ccc_transient_page_prep,
510 .cpo_completion = vvp_transient_page_completion,
515 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
516 struct cl_page *page, pgoff_t index)
518 struct ccc_page *cpg = cl_object_page_slice(obj, page);
519 struct page *vmpage = page->cp_vmpage;
521 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
523 cpg->cpg_cl.cpl_index = index;
524 cpg->cpg_page = vmpage;
525 page_cache_get(vmpage);
527 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
528 if (page->cp_type == CPT_CACHEABLE) {
529 /* in cache, decref in vvp_page_delete */
530 atomic_inc(&page->cp_ref);
531 SetPagePrivate(vmpage);
532 vmpage->private = (unsigned long)page;
533 cl_page_slice_add(page, &cpg->cpg_cl, obj,
536 struct ccc_object *clobj = cl2ccc(obj);
538 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
539 cl_page_slice_add(page, &cpg->cpg_cl, obj,
540 &vvp_transient_page_ops);
541 clobj->cob_transient_pages++;