4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_page for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
42 #include <linux/mutex.h>
43 #include <linux/page-flags.h>
44 #include <linux/pagemap.h>
46 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
50 /*****************************************************************************
55 static void vvp_page_discard(const struct lu_env *env,
56 const struct cl_page_slice *slice,
59 struct cl_page *cp = slice->cpl_page;
60 struct page *vmpage = cp->cp_vmpage;
62 if (cp->cp_defer_uptodate && !cp->cp_ra_used && vmpage->mapping != NULL)
63 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
66 static void vvp_page_delete(const struct lu_env *env,
67 const struct cl_page_slice *slice)
69 struct cl_page *cp = slice->cpl_page;
71 if (cp->cp_type == CPT_CACHEABLE) {
72 struct page *vmpage = cp->cp_vmpage;
73 struct inode *inode = vmpage->mapping->host;
75 LASSERT(PageLocked(vmpage));
76 LASSERT((struct cl_page *)vmpage->private == cp);
78 /* Drop the reference count held in vvp_page_init */
79 refcount_dec(&cp->cp_ref);
81 ClearPagePrivate(vmpage);
84 /* clearpageuptodate prevents the page being read by the
85 * kernel after it has been deleted from Lustre, which avoids
86 * potential stale data reads. The seqlock allows us to see
87 * that a page was potentially deleted and catch the resulting
88 * SIGBUS - see ll_filemap_fault() (LU-16160) */
89 write_seqlock(&ll_i2info(inode)->lli_page_inv_lock);
90 ClearPageUptodate(vmpage);
91 write_sequnlock(&ll_i2info(inode)->lli_page_inv_lock);
94 * The reference from vmpage to cl_page is removed,
95 * but the reference back is still here. It is removed
96 * later in cl_page_free().
102 * Handles page transfer errors at VM level.
104 * This takes inode as a separate argument, because inode on which error is to
105 * be set can be different from \a vmpage inode in case of direct-io.
107 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
110 struct vvp_object *obj = cl_inode2vvp(inode);
113 ClearPageError(vmpage);
114 obj->vob_discard_page_warned = 0;
116 SetPageError(vmpage);
117 if (ioret != -ENOSPC &&
118 CFS_FAIL_CHECK(OBD_FAIL_LLITE_PANIC_ON_ESTALE))
121 mapping_set_error(inode->i_mapping, ioret);
123 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
124 ioret == -EIO) && obj->vob_discard_page_warned == 0) {
125 obj->vob_discard_page_warned = 1;
126 ll_dirty_page_discard_warn(inode, ioret);
131 static void vvp_page_completion_read(const struct lu_env *env,
132 const struct cl_page_slice *slice,
135 struct cl_page *cp = slice->cpl_page;
136 struct page *vmpage = cp->cp_vmpage;
137 struct inode *inode = vvp_object_inode(cp->cp_obj);
140 LASSERT(PageLocked(vmpage));
141 CL_PAGE_HEADER(D_PAGE, env, cp, "completing READ with %d\n", ioret);
143 if (cp->cp_defer_uptodate)
144 ll_ra_count_put(ll_i2sbi(inode), 1);
148 * cp_defer_uptodate is used for readahead page, and the
149 * vmpage Uptodate bit is deferred to set in ll_readpage/
152 if (!cp->cp_defer_uptodate)
153 SetPageUptodate(vmpage);
154 } else if (cp->cp_defer_uptodate) {
155 cp->cp_defer_uptodate = 0;
156 if (ioret == -EAGAIN) {
157 /* mirror read failed, it needs to destroy the page
158 * because subpage would be from wrong osc when trying
159 * to read from a new mirror
161 generic_error_remove_page(vmpage->mapping, vmpage);
165 if (cp->cp_sync_io == NULL)
171 static void vvp_page_completion_write(const struct lu_env *env,
172 const struct cl_page_slice *slice,
175 struct cl_page *cp = slice->cpl_page;
176 struct page *vmpage = cp->cp_vmpage;
179 CL_PAGE_HEADER(D_PAGE, env, cp, "completing WRITE with %d\n", ioret);
181 if (cp->cp_sync_io != NULL) {
182 LASSERT(PageLocked(vmpage));
183 LASSERT(!PageWriteback(vmpage));
185 LASSERT(PageWriteback(vmpage));
187 * Only mark the page error only when it's an async write
188 * because applications won't wait for IO to finish.
190 vvp_vmpage_error(vvp_object_inode(cp->cp_obj), vmpage, ioret);
192 end_page_writeback(vmpage);
197 static const struct cl_page_operations vvp_page_ops = {
198 .cpo_delete = vvp_page_delete,
199 .cpo_discard = vvp_page_discard,
202 .cpo_completion = vvp_page_completion_read,
205 .cpo_completion = vvp_page_completion_write,
210 static const struct cl_page_operations vvp_transient_page_ops = {
213 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
214 struct cl_page *page, pgoff_t index)
216 struct cl_page_slice *cpl = cl_object_page_slice(obj, page);
217 struct page *vmpage = page->cp_vmpage;
219 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
221 if (page->cp_type == CPT_TRANSIENT) {
222 /* DIO pages are referenced by userspace, we don't need to take
223 * a reference on them. (contrast with get_page() call above)
225 cl_page_slice_add(page, cpl, obj,
226 &vvp_transient_page_ops);
229 /* in cache, decref in cl_page_delete() */
230 refcount_inc(&page->cp_ref);
231 SetPagePrivate(vmpage);
232 vmpage->private = (unsigned long)page;
233 cl_page_slice_add(page, cpl, obj, &vvp_page_ops);