Whamcloud - gitweb
LU-14644 vvp: wait for nrpages to be updated
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * Implementation of cl_page for VVP layer.
32  *
33  *   Author: Nikita Danilov <nikita.danilov@sun.com>
34  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/mm.h>
42 #include <linux/mutex.h>
43 #include <linux/page-flags.h>
44 #include <linux/pagemap.h>
45
46 #include <libcfs/libcfs.h>
47 #include "llite_internal.h"
48 #include "vvp_internal.h"
49
50 /*****************************************************************************
51  *
52  * Page operations.
53  *
54  */
55
56 static void vvp_page_fini_common(struct vvp_page *vpg, struct pagevec *pvec)
57 {
58         struct page *vmpage = vpg->vpg_page;
59
60         LASSERT(vmpage != NULL);
61         if (pvec) {
62                 if (!pagevec_add(pvec, vmpage))
63                         pagevec_release(pvec);
64         } else {
65                 put_page(vmpage);
66         }
67 }
68
69 static void vvp_page_fini(const struct lu_env *env,
70                           struct cl_page_slice *slice,
71                           struct pagevec *pvec)
72 {
73         struct vvp_page *vpg     = cl2vvp_page(slice);
74         struct page     *vmpage  = vpg->vpg_page;
75
76         /*
77          * vmpage->private was already cleared when page was moved into
78          * VPG_FREEING state.
79          */
80         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
81         vvp_page_fini_common(vpg, pvec);
82 }
83
84 static int vvp_page_own(const struct lu_env *env,
85                         const struct cl_page_slice *slice, struct cl_io *io,
86                         int nonblock)
87 {
88         struct vvp_page *vpg    = cl2vvp_page(slice);
89         struct page     *vmpage = vpg->vpg_page;
90
91         ENTRY;
92
93         LASSERT(vmpage != NULL);
94         if (nonblock) {
95                 if (!trylock_page(vmpage))
96                         return -EAGAIN;
97
98                 if (unlikely(PageWriteback(vmpage))) {
99                         unlock_page(vmpage);
100                         return -EAGAIN;
101                 }
102
103                 return 0;
104         }
105
106         lock_page(vmpage);
107         wait_on_page_writeback(vmpage);
108
109         RETURN(0);
110 }
111
112 static void vvp_page_assume(const struct lu_env *env,
113                             const struct cl_page_slice *slice,
114                             struct cl_io *unused)
115 {
116         struct page *vmpage = cl2vm_page(slice);
117
118         LASSERT(vmpage != NULL);
119         LASSERT(PageLocked(vmpage));
120         wait_on_page_writeback(vmpage);
121 }
122
123 static void vvp_page_unassume(const struct lu_env *env,
124                               const struct cl_page_slice *slice,
125                               struct cl_io *unused)
126 {
127         struct page *vmpage = cl2vm_page(slice);
128
129         LASSERT(vmpage != NULL);
130         LASSERT(PageLocked(vmpage));
131 }
132
133 static void vvp_page_disown(const struct lu_env *env,
134                             const struct cl_page_slice *slice, struct cl_io *io)
135 {
136         struct page *vmpage = cl2vm_page(slice);
137
138         ENTRY;
139
140         LASSERT(vmpage != NULL);
141         LASSERT(PageLocked(vmpage));
142
143         unlock_page(cl2vm_page(slice));
144
145         EXIT;
146 }
147
148 static void vvp_page_discard(const struct lu_env *env,
149                              const struct cl_page_slice *slice,
150                              struct cl_io *unused)
151 {
152         struct page     *vmpage = cl2vm_page(slice);
153         struct vvp_page *vpg    = cl2vvp_page(slice);
154
155         LASSERT(vmpage != NULL);
156         LASSERT(PageLocked(vmpage));
157
158         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used && vmpage->mapping)
159                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
160
161         generic_error_remove_page(vmpage->mapping, vmpage);
162 }
163
164 static void vvp_page_delete(const struct lu_env *env,
165                             const struct cl_page_slice *slice)
166 {
167         struct page      *vmpage = cl2vm_page(slice);
168         struct cl_page   *page   = slice->cpl_page;
169         int refc;
170
171         LASSERT(PageLocked(vmpage));
172         LASSERT((struct cl_page *)vmpage->private == page);
173
174
175         /* Drop the reference count held in vvp_page_init */
176         refc = atomic_dec_return(&page->cp_ref);
177         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
178
179         ClearPagePrivate(vmpage);
180         vmpage->private = 0;
181         /*
182          * Reference from vmpage to cl_page is removed, but the reference back
183          * is still here. It is removed later in vvp_page_fini().
184          */
185 }
186
187 static void vvp_page_export(const struct lu_env *env,
188                             const struct cl_page_slice *slice,
189                             int uptodate)
190 {
191         struct page *vmpage = cl2vm_page(slice);
192
193         LASSERT(vmpage != NULL);
194         LASSERT(PageLocked(vmpage));
195         if (uptodate)
196                 SetPageUptodate(vmpage);
197         else
198                 ClearPageUptodate(vmpage);
199 }
200
201 static int vvp_page_is_vmlocked(const struct lu_env *env,
202                                 const struct cl_page_slice *slice)
203 {
204         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
205 }
206
207 static int vvp_page_prep_read(const struct lu_env *env,
208                               const struct cl_page_slice *slice,
209                               struct cl_io *unused)
210 {
211         ENTRY;
212         /* Skip the page already marked as PG_uptodate. */
213         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
214 }
215
216 static int vvp_page_prep_write(const struct lu_env *env,
217                                const struct cl_page_slice *slice,
218                                struct cl_io *unused)
219 {
220         struct page *vmpage = cl2vm_page(slice);
221         struct cl_page *pg = slice->cpl_page;
222
223         LASSERT(PageLocked(vmpage));
224         LASSERT(!PageDirty(vmpage));
225
226         /* ll_writepage path is not a sync write, so need to set page writeback
227          * flag
228          */
229         if (pg->cp_sync_io == NULL)
230                 set_page_writeback(vmpage);
231
232         return 0;
233 }
234
235 /**
236  * Handles page transfer errors at VM level.
237  *
238  * This takes inode as a separate argument, because inode on which error is to
239  * be set can be different from \a vmpage inode in case of direct-io.
240  */
241 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage,
242                              int ioret)
243 {
244         struct vvp_object *obj = cl_inode2vvp(inode);
245
246         if (ioret == 0) {
247                 ClearPageError(vmpage);
248                 obj->vob_discard_page_warned = 0;
249         } else {
250                 SetPageError(vmpage);
251                 if (ioret == -ENOSPC)
252                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
253                 else
254                         set_bit(AS_EIO, &inode->i_mapping->flags);
255
256                 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
257                      ioret == -EIO) && obj->vob_discard_page_warned == 0) {
258                         obj->vob_discard_page_warned = 1;
259                         ll_dirty_page_discard_warn(vmpage, ioret);
260                 }
261         }
262 }
263
264 static void vvp_page_completion_read(const struct lu_env *env,
265                                      const struct cl_page_slice *slice,
266                                      int ioret)
267 {
268         struct vvp_page *vpg    = cl2vvp_page(slice);
269         struct page     *vmpage = vpg->vpg_page;
270         struct cl_page  *page   = slice->cpl_page;
271         struct inode    *inode  = vvp_object_inode(page->cp_obj);
272
273         ENTRY;
274         LASSERT(PageLocked(vmpage));
275         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
276
277         if (vpg->vpg_defer_uptodate)
278                 ll_ra_count_put(ll_i2sbi(inode), 1);
279
280         if (ioret == 0)  {
281                 if (!vpg->vpg_defer_uptodate)
282                         cl_page_export(env, page, 1);
283         } else if (vpg->vpg_defer_uptodate) {
284                 vpg->vpg_defer_uptodate = 0;
285                 if (ioret == -EAGAIN) {
286                         /* mirror read failed, it needs to destroy the page
287                          * because subpage would be from wrong osc when trying
288                          * to read from a new mirror
289                          */
290                         generic_error_remove_page(vmpage->mapping, vmpage);
291                 }
292         }
293
294         if (page->cp_sync_io == NULL)
295                 unlock_page(vmpage);
296
297         EXIT;
298 }
299
300 static void vvp_page_completion_write(const struct lu_env *env,
301                                       const struct cl_page_slice *slice,
302                                       int ioret)
303 {
304         struct vvp_page *vpg    = cl2vvp_page(slice);
305         struct cl_page  *pg     = slice->cpl_page;
306         struct page     *vmpage = vpg->vpg_page;
307
308         ENTRY;
309         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
310
311         if (pg->cp_sync_io != NULL) {
312                 LASSERT(PageLocked(vmpage));
313                 LASSERT(!PageWriteback(vmpage));
314         } else {
315                 LASSERT(PageWriteback(vmpage));
316                 /*
317                  * Only mark the page error only when it's an async write
318                  * because applications won't wait for IO to finish.
319                  */
320                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
321
322                 end_page_writeback(vmpage);
323         }
324         EXIT;
325 }
326
327 /**
328  * Implements cl_page_operations::cpo_make_ready() method.
329  *
330  * This is called to yank a page from the transfer cache and to send it out as
331  * a part of transfer. This function try-locks the page. If try-lock failed,
332  * page is owned by some concurrent IO, and should be skipped (this is bad,
333  * but hopefully rare situation, as it usually results in transfer being
334  * shorter than possible).
335  *
336  * \retval 0      success, page can be placed into transfer
337  *
338  * \retval -EAGAIN page is either used by concurrent IO has been
339  * truncated. Skip it.
340  */
341 static int vvp_page_make_ready(const struct lu_env *env,
342                                const struct cl_page_slice *slice)
343 {
344         struct page *vmpage = cl2vm_page(slice);
345         struct cl_page *pg = slice->cpl_page;
346         int result = 0;
347
348         lock_page(vmpage);
349         if (clear_page_dirty_for_io(vmpage)) {
350                 LASSERT(pg->cp_state == CPS_CACHED);
351                 /* This actually clears the dirty bit in the radix
352                  * tree.
353                  */
354                 set_page_writeback(vmpage);
355                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
356         } else if (pg->cp_state == CPS_PAGEOUT) {
357                 /* is it possible for osc_flush_async_page() to already
358                  * make it ready?
359                  */
360                 result = -EALREADY;
361         } else {
362                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
363                               pg->cp_state);
364                 LBUG();
365         }
366         unlock_page(vmpage);
367         RETURN(result);
368 }
369
370 static int vvp_page_print(const struct lu_env *env,
371                           const struct cl_page_slice *slice,
372                           void *cookie, lu_printer_t printer)
373 {
374         struct vvp_page *vpg    = cl2vvp_page(slice);
375         struct page     *vmpage = vpg->vpg_page;
376
377         (*printer)(env, cookie,
378                    LUSTRE_VVP_NAME"-page@%p(%d:%d) vm@%p ",
379                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
380
381         if (vmpage != NULL) {
382                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
383                            (long)vmpage->flags, page_count(vmpage),
384                            page_mapcount(vmpage), vmpage->private,
385                            page_index(vmpage),
386                            list_empty(&vmpage->lru) ? "not-" : "");
387         }
388
389         (*printer)(env, cookie, "\n");
390
391         return 0;
392 }
393
394 static int vvp_page_fail(const struct lu_env *env,
395                          const struct cl_page_slice *slice)
396 {
397         /*
398          * Cached read?
399          */
400         LBUG();
401
402         return 0;
403 }
404
405 static const struct cl_page_operations vvp_page_ops = {
406         .cpo_own           = vvp_page_own,
407         .cpo_assume        = vvp_page_assume,
408         .cpo_unassume      = vvp_page_unassume,
409         .cpo_disown        = vvp_page_disown,
410         .cpo_discard       = vvp_page_discard,
411         .cpo_delete        = vvp_page_delete,
412         .cpo_export        = vvp_page_export,
413         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
414         .cpo_fini          = vvp_page_fini,
415         .cpo_print         = vvp_page_print,
416         .io = {
417                 [CRT_READ] = {
418                         .cpo_prep       = vvp_page_prep_read,
419                         .cpo_completion = vvp_page_completion_read,
420                         .cpo_make_ready = vvp_page_fail,
421                 },
422                 [CRT_WRITE] = {
423                         .cpo_prep       = vvp_page_prep_write,
424                         .cpo_completion = vvp_page_completion_write,
425                         .cpo_make_ready = vvp_page_make_ready,
426                 },
427         },
428 };
429
430 static void vvp_transient_page_discard(const struct lu_env *env,
431                                        const struct cl_page_slice *slice,
432                                        struct cl_io *unused)
433 {
434         struct cl_page *page = slice->cpl_page;
435
436         /*
437          * For transient pages, remove it from the radix tree.
438          */
439         cl_page_delete(env, page);
440 }
441
442 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
443                                           const struct cl_page_slice *slice)
444 {
445         return -EBUSY;
446 }
447
448 static void vvp_transient_page_fini(const struct lu_env *env,
449                                     struct cl_page_slice *slice,
450                                     struct pagevec *pvec)
451 {
452         struct vvp_page *vpg = cl2vvp_page(slice);
453         struct vvp_object *clobj = cl2vvp(slice->cpl_obj);
454
455         vvp_page_fini_common(vpg, pvec);
456         atomic_dec(&clobj->vob_transient_pages);
457 }
458
459 static const struct cl_page_operations vvp_transient_page_ops = {
460         .cpo_discard            = vvp_transient_page_discard,
461         .cpo_fini               = vvp_transient_page_fini,
462         .cpo_is_vmlocked        = vvp_transient_page_is_vmlocked,
463         .cpo_print              = vvp_page_print,
464 };
465
466 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
467                 struct cl_page *page, pgoff_t index)
468 {
469         struct vvp_page *vpg = cl_object_page_slice(obj, page);
470         struct page     *vmpage = page->cp_vmpage;
471
472         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
473
474         vpg->vpg_page = vmpage;
475         get_page(vmpage);
476
477         if (page->cp_type == CPT_CACHEABLE) {
478                 /* in cache, decref in vvp_page_delete */
479                 atomic_inc(&page->cp_ref);
480                 SetPagePrivate(vmpage);
481                 vmpage->private = (unsigned long)page;
482                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
483                                 &vvp_page_ops);
484         } else {
485                 struct vvp_object *clobj = cl2vvp(obj);
486
487                 cl_page_slice_add(page, &vpg->vpg_cl, obj,
488                                 &vvp_transient_page_ops);
489                 atomic_inc(&clobj->vob_transient_pages);
490         }
491         return 0;
492 }