Whamcloud - gitweb
f87aa0ebbfa5f4adbda84ade6334cc7eab7b465a
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_page for VVP layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/mm.h>
42 #include <linux/mutex.h>
43 #include <linux/page-flags.h>
44 #include <linux/pagemap.h>
45
46 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51  *
52  * Page operations.
53  *
54  */
55 static void vvp_page_fini(const struct lu_env *env,
56                           struct cl_page_slice *slice,
57                           struct pagevec *pvec)
58 {
59         struct vvp_page *vpg     = cl2vvp_page(slice);
60         struct page     *vmpage  = vpg->vpg_page;
61
62         /*
63          * vmpage->private was already cleared when page was moved into
64          * VPG_FREEING state.
65          */
66         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
67         LASSERT(vmpage != NULL);
68         if (pvec) {
69                 if (!pagevec_add(pvec, vmpage))
70                         pagevec_release(pvec);
71         } else {
72                 put_page(vmpage);
73         }
74 }
75
76 static int vvp_page_own(const struct lu_env *env,
77                         const struct cl_page_slice *slice, struct cl_io *io,
78                         int nonblock)
79 {
80         struct vvp_page *vpg    = cl2vvp_page(slice);
81         struct page     *vmpage = vpg->vpg_page;
82
83         ENTRY;
84
85         LASSERT(vmpage != NULL);
86         if (nonblock) {
87                 if (!trylock_page(vmpage))
88                         return -EAGAIN;
89
90                 if (unlikely(PageWriteback(vmpage))) {
91                         unlock_page(vmpage);
92                         return -EAGAIN;
93                 }
94
95                 return 0;
96         }
97
98         lock_page(vmpage);
99         wait_on_page_writeback(vmpage);
100
101         RETURN(0);
102 }
103
104 static void vvp_page_assume(const struct lu_env *env,
105                             const struct cl_page_slice *slice,
106                             struct cl_io *unused)
107 {
108         struct page *vmpage = cl2vm_page(slice);
109
110         LASSERT(vmpage != NULL);
111         LASSERT(PageLocked(vmpage));
112         wait_on_page_writeback(vmpage);
113 }
114
115 static void vvp_page_unassume(const struct lu_env *env,
116                               const struct cl_page_slice *slice,
117                               struct cl_io *unused)
118 {
119         struct page *vmpage = cl2vm_page(slice);
120
121         LASSERT(vmpage != NULL);
122         LASSERT(PageLocked(vmpage));
123 }
124
125 static void vvp_page_disown(const struct lu_env *env,
126                             const struct cl_page_slice *slice, struct cl_io *io)
127 {
128         struct page *vmpage = cl2vm_page(slice);
129
130         ENTRY;
131
132         LASSERT(vmpage != NULL);
133         LASSERT(PageLocked(vmpage));
134
135         unlock_page(cl2vm_page(slice));
136
137         EXIT;
138 }
139
140 static void vvp_page_discard(const struct lu_env *env,
141                              const struct cl_page_slice *slice,
142                              struct cl_io *unused)
143 {
144         struct page     *vmpage = cl2vm_page(slice);
145         struct vvp_page *vpg    = cl2vvp_page(slice);
146
147         LASSERT(vmpage != NULL);
148         LASSERT(PageLocked(vmpage));
149
150         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used && vmpage->mapping)
151                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
152
153         generic_error_remove_page(vmpage->mapping, vmpage);
154 }
155
156 static void vvp_page_delete(const struct lu_env *env,
157                             const struct cl_page_slice *slice)
158 {
159         struct page      *vmpage = cl2vm_page(slice);
160         struct cl_page   *page   = slice->cpl_page;
161         int refc;
162
163         LASSERT(PageLocked(vmpage));
164         LASSERT((struct cl_page *)vmpage->private == page);
165
166
167         /* Drop the reference count held in vvp_page_init */
168         refc = atomic_dec_return(&page->cp_ref);
169         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
170
171         ClearPagePrivate(vmpage);
172         vmpage->private = 0;
173         /*
174          * Reference from vmpage to cl_page is removed, but the reference back
175          * is still here. It is removed later in vvp_page_fini().
176          */
177 }
178
179 static int vvp_page_prep_read(const struct lu_env *env,
180                               const struct cl_page_slice *slice,
181                               struct cl_io *unused)
182 {
183         ENTRY;
184         /* Skip the page already marked as PG_uptodate. */
185         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
186 }
187
188 static int vvp_page_prep_write(const struct lu_env *env,
189                                const struct cl_page_slice *slice,
190                                struct cl_io *unused)
191 {
192         struct page *vmpage = cl2vm_page(slice);
193         struct cl_page *pg = slice->cpl_page;
194
195         LASSERT(PageLocked(vmpage));
196         LASSERT(!PageDirty(vmpage));
197
198         /* ll_writepage path is not a sync write, so need to set page writeback
199          * flag
200          */
201         if (pg->cp_sync_io == NULL)
202                 set_page_writeback(vmpage);
203
204         return 0;
205 }
206
207 /**
208  * Handles page transfer errors at VM level.
209  *
210  * This takes inode as a separate argument, because inode on which error is to
211  * be set can be different from \a vmpage inode in case of direct-io.
212  */
213 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
214                              int ioret)
215 {
216         struct vvp_object *obj = cl_inode2vvp(inode);
217
218         if (ioret == 0) {
219                 ClearPageError(vmpage);
220                 obj->vob_discard_page_warned = 0;
221         } else {
222                 SetPageError(vmpage);
223                 if (ioret == -ENOSPC)
224                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
225                 else
226                         set_bit(AS_EIO, &inode->i_mapping->flags);
227
228                 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
229                      ioret == -EIO) && obj->vob_discard_page_warned == 0) {
230                         obj->vob_discard_page_warned = 1;
231                         ll_dirty_page_discard_warn(inode, ioret);
232                 }
233         }
234 }
235
236 static void vvp_page_completion_read(const struct lu_env *env,
237                                      const struct cl_page_slice *slice,
238                                      int ioret)
239 {
240         struct vvp_page *vpg    = cl2vvp_page(slice);
241         struct page     *vmpage = vpg->vpg_page;
242         struct cl_page  *page   = slice->cpl_page;
243         struct inode    *inode  = vvp_object_inode(page->cp_obj);
244
245         ENTRY;
246         LASSERT(PageLocked(vmpage));
247         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
248
249         if (vpg->vpg_defer_uptodate)
250                 ll_ra_count_put(ll_i2sbi(inode), 1);
251
252         if (ioret == 0)  {
253                 if (!vpg->vpg_defer_uptodate)
254                         SetPageUptodate(vmpage);
255         } else if (vpg->vpg_defer_uptodate) {
256                 vpg->vpg_defer_uptodate = 0;
257                 if (ioret == -EAGAIN) {
258                         /* mirror read failed, it needs to destroy the page
259                          * because subpage would be from wrong osc when trying
260                          * to read from a new mirror
261                          */
262                         generic_error_remove_page(vmpage->mapping, vmpage);
263                 }
264         }
265
266         if (page->cp_sync_io == NULL)
267                 unlock_page(vmpage);
268
269         EXIT;
270 }
271
272 static void vvp_page_completion_write(const struct lu_env *env,
273                                       const struct cl_page_slice *slice,
274                                       int ioret)
275 {
276         struct vvp_page *vpg    = cl2vvp_page(slice);
277         struct cl_page  *pg     = slice->cpl_page;
278         struct page     *vmpage = vpg->vpg_page;
279
280         ENTRY;
281         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
282
283         if (pg->cp_sync_io != NULL) {
284                 LASSERT(PageLocked(vmpage));
285                 LASSERT(!PageWriteback(vmpage));
286         } else {
287                 LASSERT(PageWriteback(vmpage));
288                 /*
289                  * Only mark the page error only when it's an async write
290                  * because applications won't wait for IO to finish.
291                  */
292                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
293
294                 end_page_writeback(vmpage);
295         }
296         EXIT;
297 }
298
299 /**
300  * Implements cl_page_operations::cpo_make_ready() method.
301  *
302  * This is called to yank a page from the transfer cache and to send it out as
303  * a part of transfer. This function try-locks the page. If try-lock failed,
304  * page is owned by some concurrent IO, and should be skipped (this is bad,
305  * but hopefully rare situation, as it usually results in transfer being
306  * shorter than possible).
307  *
308  * \retval 0      success, page can be placed into transfer
309  *
310  * \retval -EAGAIN page is either used by concurrent IO has been
311  * truncated. Skip it.
312  */
313 static int vvp_page_make_ready(const struct lu_env *env,
314                                const struct cl_page_slice *slice)
315 {
316         struct page *vmpage = cl2vm_page(slice);
317         struct cl_page *pg = slice->cpl_page;
318         int result = 0;
319
320         lock_page(vmpage);
321         if (clear_page_dirty_for_io(vmpage)) {
322                 LASSERT(pg->cp_state == CPS_CACHED);
323                 /* This actually clears the dirty bit in the radix
324                  * tree.
325                  */
326                 set_page_writeback(vmpage);
327                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
328         } else if (pg->cp_state == CPS_PAGEOUT) {
329                 /* is it possible for osc_flush_async_page() to already
330                  * make it ready?
331                  */
332                 result = -EALREADY;
333         } else {
334                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
335                               pg->cp_state);
336                 LBUG();
337         }
338         unlock_page(vmpage);
339         RETURN(result);
340 }
341
342 static int vvp_page_print(const struct lu_env *env,
343                           const struct cl_page_slice *slice,
344                           void *cookie, lu_printer_t printer)
345 {
346         struct vvp_page *vpg    = cl2vvp_page(slice);
347         struct page     *vmpage = vpg->vpg_page;
348
349         (*printer)(env, cookie,
350                    LUSTRE_VVP_NAME"-page@%p(%d:%d) vm@%p ",
351                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
352
353         if (vmpage != NULL) {
354                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
355                            (long)vmpage->flags, page_count(vmpage),
356                            page_mapcount(vmpage), vmpage->private,
357                            page_index(vmpage),
358                            list_empty(&vmpage->lru) ? "not-" : "");
359         }
360
361         (*printer)(env, cookie, "\n");
362
363         return 0;
364 }
365
366 static int vvp_page_fail(const struct lu_env *env,
367                          const struct cl_page_slice *slice)
368 {
369         /*
370          * Cached read?
371          */
372         LBUG();
373
374         return 0;
375 }
376
377 static const struct cl_page_operations vvp_page_ops = {
378         .cpo_own           = vvp_page_own,
379         .cpo_assume        = vvp_page_assume,
380         .cpo_unassume      = vvp_page_unassume,
381         .cpo_disown        = vvp_page_disown,
382         .cpo_discard       = vvp_page_discard,
383         .cpo_delete        = vvp_page_delete,
384         .cpo_fini          = vvp_page_fini,
385         .cpo_print         = vvp_page_print,
386         .io = {
387                 [CRT_READ] = {
388                         .cpo_prep       = vvp_page_prep_read,
389                         .cpo_completion = vvp_page_completion_read,
390                         .cpo_make_ready = vvp_page_fail,
391                 },
392                 [CRT_WRITE] = {
393                         .cpo_prep       = vvp_page_prep_write,
394                         .cpo_completion = vvp_page_completion_write,
395                         .cpo_make_ready = vvp_page_make_ready,
396                 },
397         },
398 };
399
400 static void vvp_transient_page_discard(const struct lu_env *env,
401                                        const struct cl_page_slice *slice,
402                                        struct cl_io *unused)
403 {
404         struct cl_page *page = slice->cpl_page;
405
406         /*
407          * For transient pages, remove it from the radix tree.
408          */
409         cl_page_delete(env, page);
410 }
411
412 static const struct cl_page_operations vvp_transient_page_ops = {
413         .cpo_discard            = vvp_transient_page_discard,
414         .cpo_print              = vvp_page_print,
415 };
416
417 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
418                 struct cl_page *page, pgoff_t index)
419 {
420         struct vvp_page *vpg = cl_object_page_slice(obj, page);
421         struct page     *vmpage = page->cp_vmpage;
422
423         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
424
425         vpg->vpg_page = vmpage;
426
427         if (page->cp_type == CPT_TRANSIENT) {
428                 /* DIO pages are referenced by userspace, we don't need to take
429                  * a reference on them. (contrast with get_page() call above)
430                  */
431                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
432                                   &vvp_transient_page_ops);
433         } else {
434                 get_page(vmpage);
435                 /* in cache, decref in vvp_page_delete */
436                 atomic_inc(&page->cp_ref);
437                 SetPagePrivate(vmpage);
438                 vmpage->private = (unsigned long)page;
439                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
440                                 &vvp_page_ops);
441         }
442
443         return 0;
444 }