4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_page for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
42 #include <linux/mutex.h>
43 #include <linux/page-flags.h>
44 #include <linux/pagemap.h>
46 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
50 /*****************************************************************************
55 static void vvp_page_fini(const struct lu_env *env,
56 struct cl_page_slice *slice,
59 struct vvp_page *vpg = cl2vvp_page(slice);
60 struct page *vmpage = vpg->vpg_page;
63 * vmpage->private was already cleared when page was moved into
66 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
67 LASSERT(vmpage != NULL);
69 if (!pagevec_add(pvec, vmpage))
70 pagevec_release(pvec);
76 static int vvp_page_own(const struct lu_env *env,
77 const struct cl_page_slice *slice, struct cl_io *io,
80 struct vvp_page *vpg = cl2vvp_page(slice);
81 struct page *vmpage = vpg->vpg_page;
85 LASSERT(vmpage != NULL);
87 if (!trylock_page(vmpage))
90 if (unlikely(PageWriteback(vmpage))) {
99 wait_on_page_writeback(vmpage);
104 static void vvp_page_assume(const struct lu_env *env,
105 const struct cl_page_slice *slice,
106 struct cl_io *unused)
108 struct page *vmpage = cl2vm_page(slice);
110 LASSERT(vmpage != NULL);
111 LASSERT(PageLocked(vmpage));
112 wait_on_page_writeback(vmpage);
115 static void vvp_page_unassume(const struct lu_env *env,
116 const struct cl_page_slice *slice,
117 struct cl_io *unused)
119 struct page *vmpage = cl2vm_page(slice);
121 LASSERT(vmpage != NULL);
122 LASSERT(PageLocked(vmpage));
125 static void vvp_page_disown(const struct lu_env *env,
126 const struct cl_page_slice *slice, struct cl_io *io)
128 struct page *vmpage = cl2vm_page(slice);
132 LASSERT(vmpage != NULL);
133 LASSERT(PageLocked(vmpage));
135 unlock_page(cl2vm_page(slice));
140 static void vvp_page_discard(const struct lu_env *env,
141 const struct cl_page_slice *slice,
142 struct cl_io *unused)
144 struct page *vmpage = cl2vm_page(slice);
145 struct vvp_page *vpg = cl2vvp_page(slice);
147 LASSERT(vmpage != NULL);
148 LASSERT(PageLocked(vmpage));
150 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used && vmpage->mapping)
151 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
153 generic_error_remove_page(vmpage->mapping, vmpage);
156 static void vvp_page_delete(const struct lu_env *env,
157 const struct cl_page_slice *slice)
159 struct page *vmpage = cl2vm_page(slice);
160 struct cl_page *page = slice->cpl_page;
163 LASSERT(PageLocked(vmpage));
164 LASSERT((struct cl_page *)vmpage->private == page);
167 /* Drop the reference count held in vvp_page_init */
168 refc = atomic_dec_return(&page->cp_ref);
169 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
171 ClearPagePrivate(vmpage);
174 * Reference from vmpage to cl_page is removed, but the reference back
175 * is still here. It is removed later in vvp_page_fini().
179 static void vvp_page_export(const struct lu_env *env,
180 const struct cl_page_slice *slice,
183 struct page *vmpage = cl2vm_page(slice);
185 LASSERT(vmpage != NULL);
186 LASSERT(PageLocked(vmpage));
188 SetPageUptodate(vmpage);
190 ClearPageUptodate(vmpage);
193 static int vvp_page_is_vmlocked(const struct lu_env *env,
194 const struct cl_page_slice *slice)
196 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
199 static int vvp_page_prep_read(const struct lu_env *env,
200 const struct cl_page_slice *slice,
201 struct cl_io *unused)
204 /* Skip the page already marked as PG_uptodate. */
205 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
208 static int vvp_page_prep_write(const struct lu_env *env,
209 const struct cl_page_slice *slice,
210 struct cl_io *unused)
212 struct page *vmpage = cl2vm_page(slice);
213 struct cl_page *pg = slice->cpl_page;
215 LASSERT(PageLocked(vmpage));
216 LASSERT(!PageDirty(vmpage));
218 /* ll_writepage path is not a sync write, so need to set page writeback
221 if (pg->cp_sync_io == NULL)
222 set_page_writeback(vmpage);
228 * Handles page transfer errors at VM level.
230 * This takes inode as a separate argument, because inode on which error is to
231 * be set can be different from \a vmpage inode in case of direct-io.
233 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
236 struct vvp_object *obj = cl_inode2vvp(inode);
239 ClearPageError(vmpage);
240 obj->vob_discard_page_warned = 0;
242 SetPageError(vmpage);
243 if (ioret == -ENOSPC)
244 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
246 set_bit(AS_EIO, &inode->i_mapping->flags);
248 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
249 ioret == -EIO) && obj->vob_discard_page_warned == 0) {
250 obj->vob_discard_page_warned = 1;
251 ll_dirty_page_discard_warn(inode, ioret);
256 static void vvp_page_completion_read(const struct lu_env *env,
257 const struct cl_page_slice *slice,
260 struct vvp_page *vpg = cl2vvp_page(slice);
261 struct page *vmpage = vpg->vpg_page;
262 struct cl_page *page = slice->cpl_page;
263 struct inode *inode = vvp_object_inode(page->cp_obj);
266 LASSERT(PageLocked(vmpage));
267 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
269 if (vpg->vpg_defer_uptodate)
270 ll_ra_count_put(ll_i2sbi(inode), 1);
273 if (!vpg->vpg_defer_uptodate)
274 cl_page_export(env, page, 1);
275 } else if (vpg->vpg_defer_uptodate) {
276 vpg->vpg_defer_uptodate = 0;
277 if (ioret == -EAGAIN) {
278 /* mirror read failed, it needs to destroy the page
279 * because subpage would be from wrong osc when trying
280 * to read from a new mirror
282 generic_error_remove_page(vmpage->mapping, vmpage);
286 if (page->cp_sync_io == NULL)
292 static void vvp_page_completion_write(const struct lu_env *env,
293 const struct cl_page_slice *slice,
296 struct vvp_page *vpg = cl2vvp_page(slice);
297 struct cl_page *pg = slice->cpl_page;
298 struct page *vmpage = vpg->vpg_page;
301 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
303 if (pg->cp_sync_io != NULL) {
304 LASSERT(PageLocked(vmpage));
305 LASSERT(!PageWriteback(vmpage));
307 LASSERT(PageWriteback(vmpage));
309 * Only mark the page error only when it's an async write
310 * because applications won't wait for IO to finish.
312 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
314 end_page_writeback(vmpage);
320 * Implements cl_page_operations::cpo_make_ready() method.
322 * This is called to yank a page from the transfer cache and to send it out as
323 * a part of transfer. This function try-locks the page. If try-lock failed,
324 * page is owned by some concurrent IO, and should be skipped (this is bad,
325 * but hopefully rare situation, as it usually results in transfer being
326 * shorter than possible).
328 * \retval 0 success, page can be placed into transfer
330 * \retval -EAGAIN page is either used by concurrent IO has been
331 * truncated. Skip it.
333 static int vvp_page_make_ready(const struct lu_env *env,
334 const struct cl_page_slice *slice)
336 struct page *vmpage = cl2vm_page(slice);
337 struct cl_page *pg = slice->cpl_page;
341 if (clear_page_dirty_for_io(vmpage)) {
342 LASSERT(pg->cp_state == CPS_CACHED);
343 /* This actually clears the dirty bit in the radix
346 set_page_writeback(vmpage);
347 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
348 } else if (pg->cp_state == CPS_PAGEOUT) {
349 /* is it possible for osc_flush_async_page() to already
354 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
362 static int vvp_page_print(const struct lu_env *env,
363 const struct cl_page_slice *slice,
364 void *cookie, lu_printer_t printer)
366 struct vvp_page *vpg = cl2vvp_page(slice);
367 struct page *vmpage = vpg->vpg_page;
369 (*printer)(env, cookie,
370 LUSTRE_VVP_NAME"-page@%p(%d:%d) vm@%p ",
371 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
373 if (vmpage != NULL) {
374 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
375 (long)vmpage->flags, page_count(vmpage),
376 page_mapcount(vmpage), vmpage->private,
378 list_empty(&vmpage->lru) ? "not-" : "");
381 (*printer)(env, cookie, "\n");
386 static int vvp_page_fail(const struct lu_env *env,
387 const struct cl_page_slice *slice)
397 static const struct cl_page_operations vvp_page_ops = {
398 .cpo_own = vvp_page_own,
399 .cpo_assume = vvp_page_assume,
400 .cpo_unassume = vvp_page_unassume,
401 .cpo_disown = vvp_page_disown,
402 .cpo_discard = vvp_page_discard,
403 .cpo_delete = vvp_page_delete,
404 .cpo_export = vvp_page_export,
405 .cpo_is_vmlocked = vvp_page_is_vmlocked,
406 .cpo_fini = vvp_page_fini,
407 .cpo_print = vvp_page_print,
410 .cpo_prep = vvp_page_prep_read,
411 .cpo_completion = vvp_page_completion_read,
412 .cpo_make_ready = vvp_page_fail,
415 .cpo_prep = vvp_page_prep_write,
416 .cpo_completion = vvp_page_completion_write,
417 .cpo_make_ready = vvp_page_make_ready,
422 static void vvp_transient_page_discard(const struct lu_env *env,
423 const struct cl_page_slice *slice,
424 struct cl_io *unused)
426 struct cl_page *page = slice->cpl_page;
429 * For transient pages, remove it from the radix tree.
431 cl_page_delete(env, page);
434 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
435 const struct cl_page_slice *slice)
440 static const struct cl_page_operations vvp_transient_page_ops = {
441 .cpo_discard = vvp_transient_page_discard,
442 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
443 .cpo_print = vvp_page_print,
446 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
447 struct cl_page *page, pgoff_t index)
449 struct vvp_page *vpg = cl_object_page_slice(obj, page);
450 struct page *vmpage = page->cp_vmpage;
452 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
454 vpg->vpg_page = vmpage;
456 if (page->cp_type == CPT_TRANSIENT) {
457 /* DIO pages are referenced by userspace, we don't need to take
458 * a reference on them. (contrast with get_page() call above)
460 cl_page_slice_add(page, &vpg->vpg_cl, obj,
461 &vvp_transient_page_ops);
464 /* in cache, decref in vvp_page_delete */
465 atomic_inc(&page->cp_ref);
466 SetPagePrivate(vmpage);
467 vmpage->private = (unsigned long)page;
468 cl_page_slice_add(page, &vpg->vpg_cl, obj,