4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_page for VVP layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
43 #include <linux/mutex.h>
44 #include <linux/page-flags.h>
45 #include <linux/pagemap.h>
47 #include <libcfs/libcfs.h>
48 #include "llite_internal.h"
49 #include "vvp_internal.h"
51 /*****************************************************************************
57 static void vvp_page_fini_common(struct vvp_page *vpg)
59 struct page *vmpage = vpg->vpg_page;
61 LASSERT(vmpage != NULL);
65 static void vvp_page_fini(const struct lu_env *env,
66 struct cl_page_slice *slice)
68 struct vvp_page *vpg = cl2vvp_page(slice);
69 struct page *vmpage = vpg->vpg_page;
72 * vmpage->private was already cleared when page was moved into
75 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
76 vvp_page_fini_common(vpg);
79 static int vvp_page_own(const struct lu_env *env,
80 const struct cl_page_slice *slice, struct cl_io *io,
83 struct vvp_page *vpg = cl2vvp_page(slice);
84 struct page *vmpage = vpg->vpg_page;
86 LASSERT(vmpage != NULL);
88 if (!trylock_page(vmpage))
91 if (unlikely(PageWriteback(vmpage))) {
100 wait_on_page_writeback(vmpage);
105 static void vvp_page_assume(const struct lu_env *env,
106 const struct cl_page_slice *slice,
107 struct cl_io *unused)
109 struct page *vmpage = cl2vm_page(slice);
111 LASSERT(vmpage != NULL);
112 LASSERT(PageLocked(vmpage));
113 wait_on_page_writeback(vmpage);
116 static void vvp_page_unassume(const struct lu_env *env,
117 const struct cl_page_slice *slice,
118 struct cl_io *unused)
120 struct page *vmpage = cl2vm_page(slice);
122 LASSERT(vmpage != NULL);
123 LASSERT(PageLocked(vmpage));
126 static void vvp_page_disown(const struct lu_env *env,
127 const struct cl_page_slice *slice, struct cl_io *io)
129 struct page *vmpage = cl2vm_page(slice);
131 LASSERT(vmpage != NULL);
132 LASSERT(PageLocked(vmpage));
134 unlock_page(cl2vm_page(slice));
137 static void vvp_page_discard(const struct lu_env *env,
138 const struct cl_page_slice *slice,
139 struct cl_io *unused)
141 struct page *vmpage = cl2vm_page(slice);
142 struct vvp_page *vpg = cl2vvp_page(slice);
144 LASSERT(vmpage != NULL);
145 LASSERT(PageLocked(vmpage));
147 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
148 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
150 ll_invalidate_page(vmpage);
153 static void vvp_page_delete(const struct lu_env *env,
154 const struct cl_page_slice *slice)
156 struct page *vmpage = cl2vm_page(slice);
157 struct inode *inode = vmpage->mapping->host;
158 struct cl_object *obj = slice->cpl_obj;
159 struct cl_page *page = slice->cpl_page;
162 LASSERT(PageLocked(vmpage));
163 LASSERT((struct cl_page *)vmpage->private == page);
164 LASSERT(inode == vvp_object_inode(obj));
166 /* Drop the reference count held in vvp_page_init */
167 refc = atomic_dec_return(&page->cp_ref);
168 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
170 ClearPagePrivate(vmpage);
173 * Reference from vmpage to cl_page is removed, but the reference back
174 * is still here. It is removed later in vvp_page_fini().
178 static void vvp_page_export(const struct lu_env *env,
179 const struct cl_page_slice *slice,
182 struct page *vmpage = cl2vm_page(slice);
184 LASSERT(vmpage != NULL);
185 LASSERT(PageLocked(vmpage));
187 SetPageUptodate(vmpage);
189 ClearPageUptodate(vmpage);
192 static int vvp_page_is_vmlocked(const struct lu_env *env,
193 const struct cl_page_slice *slice)
195 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
198 static int vvp_page_prep_read(const struct lu_env *env,
199 const struct cl_page_slice *slice,
200 struct cl_io *unused)
203 /* Skip the page already marked as PG_uptodate. */
204 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
207 static int vvp_page_prep_write(const struct lu_env *env,
208 const struct cl_page_slice *slice,
209 struct cl_io *unused)
211 struct page *vmpage = cl2vm_page(slice);
213 LASSERT(PageLocked(vmpage));
214 LASSERT(!PageDirty(vmpage));
216 set_page_writeback(vmpage);
222 * Handles page transfer errors at VM level.
224 * This takes inode as a separate argument, because inode on which error is to
225 * be set can be different from \a vmpage inode in case of direct-io.
227 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
229 struct vvp_object *obj = cl_inode2vvp(inode);
232 ClearPageError(vmpage);
233 obj->vob_discard_page_warned = 0;
235 SetPageError(vmpage);
236 if (ioret == -ENOSPC)
237 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
239 set_bit(AS_EIO, &inode->i_mapping->flags);
241 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
242 obj->vob_discard_page_warned == 0) {
243 obj->vob_discard_page_warned = 1;
244 ll_dirty_page_discard_warn(vmpage, ioret);
249 static void vvp_page_completion_read(const struct lu_env *env,
250 const struct cl_page_slice *slice,
253 struct vvp_page *vpg = cl2vvp_page(slice);
254 struct page *vmpage = vpg->vpg_page;
255 struct cl_page *page = slice->cpl_page;
256 struct inode *inode = vvp_object_inode(page->cp_obj);
259 LASSERT(PageLocked(vmpage));
260 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
262 if (vpg->vpg_defer_uptodate)
263 ll_ra_count_put(ll_i2sbi(inode), 1);
266 if (!vpg->vpg_defer_uptodate)
267 cl_page_export(env, page, 1);
269 vpg->vpg_defer_uptodate = 0;
272 if (page->cp_sync_io == NULL)
278 static void vvp_page_completion_write(const struct lu_env *env,
279 const struct cl_page_slice *slice,
282 struct vvp_page *vpg = cl2vvp_page(slice);
283 struct cl_page *pg = slice->cpl_page;
284 struct page *vmpage = vpg->vpg_page;
287 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
288 LASSERT(PageWriteback(vmpage));
290 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
293 * Only mark the page error only when it's an async write because
294 * applications won't wait for IO to finish.
296 if (pg->cp_sync_io == NULL)
297 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
299 end_page_writeback(vmpage);
304 * Implements cl_page_operations::cpo_make_ready() method.
306 * This is called to yank a page from the transfer cache and to send it out as
307 * a part of transfer. This function try-locks the page. If try-lock failed,
308 * page is owned by some concurrent IO, and should be skipped (this is bad,
309 * but hopefully rare situation, as it usually results in transfer being
310 * shorter than possible).
312 * \retval 0 success, page can be placed into transfer
314 * \retval -EAGAIN page is either used by concurrent IO has been
315 * truncated. Skip it.
317 static int vvp_page_make_ready(const struct lu_env *env,
318 const struct cl_page_slice *slice)
320 struct page *vmpage = cl2vm_page(slice);
321 struct cl_page *pg = slice->cpl_page;
325 if (clear_page_dirty_for_io(vmpage)) {
326 LASSERT(pg->cp_state == CPS_CACHED);
327 /* This actually clears the dirty bit in the radix
329 set_page_writeback(vmpage);
330 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
331 } else if (pg->cp_state == CPS_PAGEOUT) {
332 /* is it possible for osc_flush_async_page() to already
336 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
344 static int vvp_page_print(const struct lu_env *env,
345 const struct cl_page_slice *slice,
346 void *cookie, lu_printer_t printer)
348 struct vvp_page *vpg = cl2vvp_page(slice);
349 struct page *vmpage = vpg->vpg_page;
351 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) "
353 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
355 if (vmpage != NULL) {
356 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
357 (long)vmpage->flags, page_count(vmpage),
358 page_mapcount(vmpage), vmpage->private,
360 list_empty(&vmpage->lru) ? "not-" : "");
363 (*printer)(env, cookie, "\n");
368 static int vvp_page_fail(const struct lu_env *env,
369 const struct cl_page_slice *slice)
379 static const struct cl_page_operations vvp_page_ops = {
380 .cpo_own = vvp_page_own,
381 .cpo_assume = vvp_page_assume,
382 .cpo_unassume = vvp_page_unassume,
383 .cpo_disown = vvp_page_disown,
384 .cpo_discard = vvp_page_discard,
385 .cpo_delete = vvp_page_delete,
386 .cpo_export = vvp_page_export,
387 .cpo_is_vmlocked = vvp_page_is_vmlocked,
388 .cpo_fini = vvp_page_fini,
389 .cpo_print = vvp_page_print,
392 .cpo_prep = vvp_page_prep_read,
393 .cpo_completion = vvp_page_completion_read,
394 .cpo_make_ready = vvp_page_fail,
397 .cpo_prep = vvp_page_prep_write,
398 .cpo_completion = vvp_page_completion_write,
399 .cpo_make_ready = vvp_page_make_ready,
404 static int vvp_transient_page_prep(const struct lu_env *env,
405 const struct cl_page_slice *slice,
406 struct cl_io *unused)
409 /* transient page should always be sent. */
413 static void vvp_transient_page_verify(const struct cl_page *page)
417 static int vvp_transient_page_own(const struct lu_env *env,
418 const struct cl_page_slice *slice,
419 struct cl_io *unused, int nonblock)
421 vvp_transient_page_verify(slice->cpl_page);
425 static void vvp_transient_page_assume(const struct lu_env *env,
426 const struct cl_page_slice *slice,
427 struct cl_io *unused)
429 vvp_transient_page_verify(slice->cpl_page);
432 static void vvp_transient_page_unassume(const struct lu_env *env,
433 const struct cl_page_slice *slice,
434 struct cl_io *unused)
436 vvp_transient_page_verify(slice->cpl_page);
439 static void vvp_transient_page_disown(const struct lu_env *env,
440 const struct cl_page_slice *slice,
441 struct cl_io *unused)
443 vvp_transient_page_verify(slice->cpl_page);
446 static void vvp_transient_page_discard(const struct lu_env *env,
447 const struct cl_page_slice *slice,
448 struct cl_io *unused)
450 struct cl_page *page = slice->cpl_page;
452 vvp_transient_page_verify(slice->cpl_page);
455 * For transient pages, remove it from the radix tree.
457 cl_page_delete(env, page);
460 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
461 const struct cl_page_slice *slice)
463 struct inode *inode = vvp_object_inode(slice->cpl_obj);
466 locked = !inode_trylock(inode);
469 return locked ? -EBUSY : -ENODATA;
473 vvp_transient_page_completion(const struct lu_env *env,
474 const struct cl_page_slice *slice,
477 vvp_transient_page_verify(slice->cpl_page);
480 static void vvp_transient_page_fini(const struct lu_env *env,
481 struct cl_page_slice *slice)
483 struct vvp_page *vpg = cl2vvp_page(slice);
484 struct cl_page *clp = slice->cpl_page;
485 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
487 vvp_page_fini_common(vpg);
488 atomic_dec(&clobj->vob_transient_pages);
491 static const struct cl_page_operations vvp_transient_page_ops = {
492 .cpo_own = vvp_transient_page_own,
493 .cpo_assume = vvp_transient_page_assume,
494 .cpo_unassume = vvp_transient_page_unassume,
495 .cpo_disown = vvp_transient_page_disown,
496 .cpo_discard = vvp_transient_page_discard,
497 .cpo_fini = vvp_transient_page_fini,
498 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
499 .cpo_print = vvp_page_print,
502 .cpo_prep = vvp_transient_page_prep,
503 .cpo_completion = vvp_transient_page_completion,
506 .cpo_prep = vvp_transient_page_prep,
507 .cpo_completion = vvp_transient_page_completion,
512 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
513 struct cl_page *page, pgoff_t index)
515 struct vvp_page *vpg = cl_object_page_slice(obj, page);
516 struct page *vmpage = page->cp_vmpage;
518 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
520 vpg->vpg_page = vmpage;
523 if (page->cp_type == CPT_CACHEABLE) {
524 /* in cache, decref in vvp_page_delete */
525 atomic_inc(&page->cp_ref);
526 SetPagePrivate(vmpage);
527 vmpage->private = (unsigned long)page;
528 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
531 struct vvp_object *clobj = cl2vvp(obj);
533 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
534 &vvp_transient_page_ops);
535 atomic_inc(&clobj->vob_transient_pages);