Whamcloud - gitweb
f0edcbce9033d1f038235acdd30dde845f963f7f
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_page for VVP layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/mm.h>
42 #include <linux/mutex.h>
43 #include <linux/page-flags.h>
44 #include <linux/pagemap.h>
45
46 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51  *
52  * Page operations.
53  *
54  */
55 static void vvp_page_discard(const struct lu_env *env,
56                              const struct cl_page_slice *slice,
57                              struct cl_io *unused)
58 {
59         struct cl_page *cp = slice->cpl_page;
60         struct page *vmpage = cp->cp_vmpage;
61
62         if (cp->cp_defer_uptodate && !cp->cp_ra_used && vmpage->mapping != NULL)
63                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
64 }
65
66 static int vvp_page_prep_read(const struct lu_env *env,
67                               const struct cl_page_slice *slice,
68                               struct cl_io *unused)
69 {
70         ENTRY;
71         /* Skip the page already marked as PG_uptodate. */
72         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
73 }
74
75 static int vvp_page_prep_write(const struct lu_env *env,
76                                const struct cl_page_slice *slice,
77                                struct cl_io *unused)
78 {
79         struct page *vmpage = cl2vm_page(slice);
80         struct cl_page *pg = slice->cpl_page;
81
82         LASSERT(PageLocked(vmpage));
83         LASSERT(!PageDirty(vmpage));
84
85         /* ll_writepage path is not a sync write, so need to set page writeback
86          * flag
87          */
88         if (pg->cp_sync_io == NULL)
89                 set_page_writeback(vmpage);
90
91         return 0;
92 }
93
94 /**
95  * Handles page transfer errors at VM level.
96  *
97  * This takes inode as a separate argument, because inode on which error is to
98  * be set can be different from \a vmpage inode in case of direct-io.
99  */
100 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
101                              int ioret)
102 {
103         struct vvp_object *obj = cl_inode2vvp(inode);
104
105         if (ioret == 0) {
106                 ClearPageError(vmpage);
107                 obj->vob_discard_page_warned = 0;
108         } else {
109                 SetPageError(vmpage);
110                 if (ioret == -ENOSPC)
111                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
112                 else
113                         set_bit(AS_EIO, &inode->i_mapping->flags);
114
115                 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
116                      ioret == -EIO) && obj->vob_discard_page_warned == 0) {
117                         obj->vob_discard_page_warned = 1;
118                         ll_dirty_page_discard_warn(inode, ioret);
119                 }
120         }
121 }
122
123 static void vvp_page_completion_read(const struct lu_env *env,
124                                      const struct cl_page_slice *slice,
125                                      int ioret)
126 {
127         struct cl_page *cp = slice->cpl_page;
128         struct page *vmpage = cp->cp_vmpage;
129         struct inode *inode = vvp_object_inode(cp->cp_obj);
130
131         ENTRY;
132         LASSERT(PageLocked(vmpage));
133         CL_PAGE_HEADER(D_PAGE, env, cp, "completing READ with %d\n", ioret);
134
135         if (cp->cp_defer_uptodate)
136                 ll_ra_count_put(ll_i2sbi(inode), 1);
137
138         if (ioret == 0)  {
139                 if (!cp->cp_defer_uptodate)
140                         SetPageUptodate(vmpage);
141         } else if (cp->cp_defer_uptodate) {
142                 cp->cp_defer_uptodate = 0;
143                 if (ioret == -EAGAIN) {
144                         /* mirror read failed, it needs to destroy the page
145                          * because subpage would be from wrong osc when trying
146                          * to read from a new mirror
147                          */
148                         generic_error_remove_page(vmpage->mapping, vmpage);
149                 }
150         }
151
152         if (cp->cp_sync_io == NULL)
153                 unlock_page(vmpage);
154
155         EXIT;
156 }
157
158 static void vvp_page_completion_write(const struct lu_env *env,
159                                       const struct cl_page_slice *slice,
160                                       int ioret)
161 {
162         struct vvp_page *vpg    = cl2vvp_page(slice);
163         struct cl_page  *pg     = slice->cpl_page;
164         struct page     *vmpage = vpg->vpg_page;
165
166         ENTRY;
167         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
168
169         if (pg->cp_sync_io != NULL) {
170                 LASSERT(PageLocked(vmpage));
171                 LASSERT(!PageWriteback(vmpage));
172         } else {
173                 LASSERT(PageWriteback(vmpage));
174                 /*
175                  * Only mark the page error only when it's an async write
176                  * because applications won't wait for IO to finish.
177                  */
178                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
179
180                 end_page_writeback(vmpage);
181         }
182         EXIT;
183 }
184
185 /**
186  * Implements cl_page_operations::cpo_make_ready() method.
187  *
188  * This is called to yank a page from the transfer cache and to send it out as
189  * a part of transfer. This function try-locks the page. If try-lock failed,
190  * page is owned by some concurrent IO, and should be skipped (this is bad,
191  * but hopefully rare situation, as it usually results in transfer being
192  * shorter than possible).
193  *
194  * \retval 0      success, page can be placed into transfer
195  *
196  * \retval -EAGAIN page is either used by concurrent IO has been
197  * truncated. Skip it.
198  */
199 static int vvp_page_make_ready(const struct lu_env *env,
200                                const struct cl_page_slice *slice)
201 {
202         struct page *vmpage = cl2vm_page(slice);
203         struct cl_page *pg = slice->cpl_page;
204         int result = 0;
205
206         lock_page(vmpage);
207         if (clear_page_dirty_for_io(vmpage)) {
208                 LASSERT(pg->cp_state == CPS_CACHED);
209                 /* This actually clears the dirty bit in the radix
210                  * tree.
211                  */
212                 set_page_writeback(vmpage);
213                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
214         } else if (pg->cp_state == CPS_PAGEOUT) {
215                 /* is it possible for osc_flush_async_page() to already
216                  * make it ready?
217                  */
218                 result = -EALREADY;
219         } else {
220                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
221                               pg->cp_state);
222                 LBUG();
223         }
224         unlock_page(vmpage);
225         RETURN(result);
226 }
227
228 static int vvp_page_fail(const struct lu_env *env,
229                          const struct cl_page_slice *slice)
230 {
231         /*
232          * Cached read?
233          */
234         LBUG();
235
236         return 0;
237 }
238
239 static const struct cl_page_operations vvp_page_ops = {
240         .cpo_discard       = vvp_page_discard,
241         .io = {
242                 [CRT_READ] = {
243                         .cpo_prep       = vvp_page_prep_read,
244                         .cpo_completion = vvp_page_completion_read,
245                         .cpo_make_ready = vvp_page_fail,
246                 },
247                 [CRT_WRITE] = {
248                         .cpo_prep       = vvp_page_prep_write,
249                         .cpo_completion = vvp_page_completion_write,
250                         .cpo_make_ready = vvp_page_make_ready,
251                 },
252         },
253 };
254
255 static const struct cl_page_operations vvp_transient_page_ops = {
256 };
257
258 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
259                 struct cl_page *page, pgoff_t index)
260 {
261         struct vvp_page *vpg = cl_object_page_slice(obj, page);
262         struct page     *vmpage = page->cp_vmpage;
263
264         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
265
266         vpg->vpg_page = vmpage;
267
268         if (page->cp_type == CPT_TRANSIENT) {
269                 /* DIO pages are referenced by userspace, we don't need to take
270                  * a reference on them. (contrast with get_page() call above)
271                  */
272                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
273                                   &vvp_transient_page_ops);
274         } else {
275                 get_page(vmpage);
276                 /* in cache, decref in cl_page_delete() */
277                 atomic_inc(&page->cp_ref);
278                 SetPagePrivate(vmpage);
279                 vmpage->private = (unsigned long)page;
280                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
281                                 &vvp_page_ops);
282         }
283
284         return 0;
285 }