Whamcloud - gitweb
LU-14490 lmv: striped directory as subdirectory mount
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_page for VVP layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/mm.h>
43 #include <linux/mutex.h>
44 #include <linux/page-flags.h>
45 #include <linux/pagemap.h>
46
47 #include <libcfs/libcfs.h>
48 #include "llite_internal.h"
49 #include "vvp_internal.h"
50
51 /*****************************************************************************
52  *
53  * Page operations.
54  *
55  */
56
57 static void vvp_page_fini_common(struct vvp_page *vpg, struct pagevec *pvec)
58 {
59         struct page *vmpage = vpg->vpg_page;
60
61         LASSERT(vmpage != NULL);
62         if (pvec) {
63                 if (!pagevec_add(pvec, vmpage))
64                         pagevec_release(pvec);
65         } else {
66                 put_page(vmpage);
67         }
68 }
69
70 static void vvp_page_fini(const struct lu_env *env,
71                           struct cl_page_slice *slice,
72                           struct pagevec *pvec)
73 {
74         struct vvp_page *vpg     = cl2vvp_page(slice);
75         struct page     *vmpage  = vpg->vpg_page;
76
77         /*
78          * vmpage->private was already cleared when page was moved into
79          * VPG_FREEING state.
80          */
81         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
82         vvp_page_fini_common(vpg, pvec);
83 }
84
85 static int vvp_page_own(const struct lu_env *env,
86                         const struct cl_page_slice *slice, struct cl_io *io,
87                         int nonblock)
88 {
89         struct vvp_page *vpg    = cl2vvp_page(slice);
90         struct page     *vmpage = vpg->vpg_page;
91
92         LASSERT(vmpage != NULL);
93         if (nonblock) {
94                 if (!trylock_page(vmpage))
95                         return -EAGAIN;
96
97                 if (unlikely(PageWriteback(vmpage))) {
98                         unlock_page(vmpage);
99                         return -EAGAIN;
100                 }
101
102                 return 0;
103         }
104
105         lock_page(vmpage);
106         wait_on_page_writeback(vmpage);
107
108         return 0;
109 }
110
111 static void vvp_page_assume(const struct lu_env *env,
112                             const struct cl_page_slice *slice,
113                             struct cl_io *unused)
114 {
115         struct page *vmpage = cl2vm_page(slice);
116
117         LASSERT(vmpage != NULL);
118         LASSERT(PageLocked(vmpage));
119         wait_on_page_writeback(vmpage);
120 }
121
122 static void vvp_page_unassume(const struct lu_env *env,
123                               const struct cl_page_slice *slice,
124                               struct cl_io *unused)
125 {
126         struct page *vmpage = cl2vm_page(slice);
127
128         LASSERT(vmpage != NULL);
129         LASSERT(PageLocked(vmpage));
130 }
131
132 static void vvp_page_disown(const struct lu_env *env,
133                             const struct cl_page_slice *slice, struct cl_io *io)
134 {
135         struct page *vmpage = cl2vm_page(slice);
136
137         LASSERT(vmpage != NULL);
138         LASSERT(PageLocked(vmpage));
139
140         unlock_page(cl2vm_page(slice));
141 }
142
143 static void vvp_page_discard(const struct lu_env *env,
144                              const struct cl_page_slice *slice,
145                              struct cl_io *unused)
146 {
147         struct page     *vmpage = cl2vm_page(slice);
148         struct vvp_page *vpg    = cl2vvp_page(slice);
149
150         LASSERT(vmpage != NULL);
151         LASSERT(PageLocked(vmpage));
152
153         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used && vmpage->mapping)
154                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
155
156         ll_invalidate_page(vmpage);
157 }
158
159 static void vvp_page_delete(const struct lu_env *env,
160                             const struct cl_page_slice *slice)
161 {
162         struct page      *vmpage = cl2vm_page(slice);
163         struct cl_page   *page   = slice->cpl_page;
164         int refc;
165
166         LASSERT(PageLocked(vmpage));
167         LASSERT((struct cl_page *)vmpage->private == page);
168
169
170         /* Drop the reference count held in vvp_page_init */
171         refc = atomic_dec_return(&page->cp_ref);
172         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
173
174         ClearPagePrivate(vmpage);
175         vmpage->private = 0;
176         /*
177          * Reference from vmpage to cl_page is removed, but the reference back
178          * is still here. It is removed later in vvp_page_fini().
179          */
180 }
181
182 static void vvp_page_export(const struct lu_env *env,
183                             const struct cl_page_slice *slice,
184                             int uptodate)
185 {
186         struct page *vmpage = cl2vm_page(slice);
187
188         LASSERT(vmpage != NULL);
189         LASSERT(PageLocked(vmpage));
190         if (uptodate)
191                 SetPageUptodate(vmpage);
192         else
193                 ClearPageUptodate(vmpage);
194 }
195
196 static int vvp_page_is_vmlocked(const struct lu_env *env,
197                                 const struct cl_page_slice *slice)
198 {
199         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
200 }
201
202 static int vvp_page_prep_read(const struct lu_env *env,
203                               const struct cl_page_slice *slice,
204                               struct cl_io *unused)
205 {
206         ENTRY;
207         /* Skip the page already marked as PG_uptodate. */
208         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
209 }
210
211 static int vvp_page_prep_write(const struct lu_env *env,
212                                const struct cl_page_slice *slice,
213                                struct cl_io *unused)
214 {
215         struct page *vmpage = cl2vm_page(slice);
216         struct cl_page *pg = slice->cpl_page;
217
218         LASSERT(PageLocked(vmpage));
219         LASSERT(!PageDirty(vmpage));
220
221         /* ll_writepage path is not a sync write, so need to set page writeback
222          * flag */
223         if (pg->cp_sync_io == NULL)
224                 set_page_writeback(vmpage);
225
226         return 0;
227 }
228
229 /**
230  * Handles page transfer errors at VM level.
231  *
232  * This takes inode as a separate argument, because inode on which error is to
233  * be set can be different from \a vmpage inode in case of direct-io.
234  */
235 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
236 {
237         struct vvp_object *obj = cl_inode2vvp(inode);
238
239         if (ioret == 0) {
240                 ClearPageError(vmpage);
241                 obj->vob_discard_page_warned = 0;
242         } else {
243                 SetPageError(vmpage);
244                 if (ioret == -ENOSPC)
245                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
246                 else
247                         set_bit(AS_EIO, &inode->i_mapping->flags);
248
249                 if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
250                      ioret == -EIO) && obj->vob_discard_page_warned == 0) {
251                         obj->vob_discard_page_warned = 1;
252                         ll_dirty_page_discard_warn(vmpage, ioret);
253                 }
254         }
255 }
256
257 static void vvp_page_completion_read(const struct lu_env *env,
258                                      const struct cl_page_slice *slice,
259                                      int ioret)
260 {
261         struct vvp_page *vpg    = cl2vvp_page(slice);
262         struct page     *vmpage = vpg->vpg_page;
263         struct cl_page  *page   = slice->cpl_page;
264         struct inode    *inode  = vvp_object_inode(page->cp_obj);
265         ENTRY;
266
267         LASSERT(PageLocked(vmpage));
268         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
269
270         if (vpg->vpg_defer_uptodate)
271                 ll_ra_count_put(ll_i2sbi(inode), 1);
272
273         if (ioret == 0)  {
274                 if (!vpg->vpg_defer_uptodate)
275                         cl_page_export(env, page, 1);
276         } else if (vpg->vpg_defer_uptodate) {
277                 vpg->vpg_defer_uptodate = 0;
278                 if (ioret == -EWOULDBLOCK) {
279                         /* mirror read failed, it needs to destroy the page
280                          * because subpage would be from wrong osc when trying
281                          * to read from a new mirror */
282                         ll_invalidate_page(vmpage);
283                 }
284         }
285
286         if (page->cp_sync_io == NULL)
287                 unlock_page(vmpage);
288
289         EXIT;
290 }
291
292 static void vvp_page_completion_write(const struct lu_env *env,
293                                       const struct cl_page_slice *slice,
294                                       int ioret)
295 {
296         struct vvp_page *vpg    = cl2vvp_page(slice);
297         struct cl_page  *pg     = slice->cpl_page;
298         struct page     *vmpage = vpg->vpg_page;
299         ENTRY;
300
301         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
302
303         if (pg->cp_sync_io != NULL) {
304                 LASSERT(PageLocked(vmpage));
305                 LASSERT(!PageWriteback(vmpage));
306         } else {
307                 LASSERT(PageWriteback(vmpage));
308                 /*
309                  * Only mark the page error only when it's an async write
310                  * because applications won't wait for IO to finish.
311                  */
312                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
313
314                 end_page_writeback(vmpage);
315         }
316         EXIT;
317 }
318
319 /**
320  * Implements cl_page_operations::cpo_make_ready() method.
321  *
322  * This is called to yank a page from the transfer cache and to send it out as
323  * a part of transfer. This function try-locks the page. If try-lock failed,
324  * page is owned by some concurrent IO, and should be skipped (this is bad,
325  * but hopefully rare situation, as it usually results in transfer being
326  * shorter than possible).
327  *
328  * \retval 0      success, page can be placed into transfer
329  *
330  * \retval -EAGAIN page is either used by concurrent IO has been
331  * truncated. Skip it.
332  */
333 static int vvp_page_make_ready(const struct lu_env *env,
334                                const struct cl_page_slice *slice)
335 {
336         struct page *vmpage = cl2vm_page(slice);
337         struct cl_page *pg = slice->cpl_page;
338         int result = 0;
339
340         lock_page(vmpage);
341         if (clear_page_dirty_for_io(vmpage)) {
342                 LASSERT(pg->cp_state == CPS_CACHED);
343                 /* This actually clears the dirty bit in the radix
344                  * tree. */
345                 set_page_writeback(vmpage);
346                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
347         } else if (pg->cp_state == CPS_PAGEOUT) {
348                 /* is it possible for osc_flush_async_page() to already
349                  * make it ready? */
350                 result = -EALREADY;
351         } else {
352                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
353                               pg->cp_state);
354                 LBUG();
355         }
356         unlock_page(vmpage);
357         RETURN(result);
358 }
359
360 static int vvp_page_print(const struct lu_env *env,
361                           const struct cl_page_slice *slice,
362                           void *cookie, lu_printer_t printer)
363 {
364         struct vvp_page *vpg    = cl2vvp_page(slice);
365         struct page     *vmpage = vpg->vpg_page;
366
367         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) "
368                    "vm@%p ",
369                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
370
371         if (vmpage != NULL) {
372                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
373                            (long)vmpage->flags, page_count(vmpage),
374                            page_mapcount(vmpage), vmpage->private,
375                            page_index(vmpage),
376                            list_empty(&vmpage->lru) ? "not-" : "");
377         }
378
379         (*printer)(env, cookie, "\n");
380
381         return 0;
382 }
383
384 static int vvp_page_fail(const struct lu_env *env,
385                          const struct cl_page_slice *slice)
386 {
387         /*
388          * Cached read?
389          */
390         LBUG();
391
392         return 0;
393 }
394
395 static const struct cl_page_operations vvp_page_ops = {
396         .cpo_own           = vvp_page_own,
397         .cpo_assume        = vvp_page_assume,
398         .cpo_unassume      = vvp_page_unassume,
399         .cpo_disown        = vvp_page_disown,
400         .cpo_discard       = vvp_page_discard,
401         .cpo_delete        = vvp_page_delete,
402         .cpo_export        = vvp_page_export,
403         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
404         .cpo_fini          = vvp_page_fini,
405         .cpo_print         = vvp_page_print,
406         .io = {
407                 [CRT_READ] = {
408                         .cpo_prep       = vvp_page_prep_read,
409                         .cpo_completion = vvp_page_completion_read,
410                         .cpo_make_ready = vvp_page_fail,
411                 },
412                 [CRT_WRITE] = {
413                         .cpo_prep       = vvp_page_prep_write,
414                         .cpo_completion = vvp_page_completion_write,
415                         .cpo_make_ready = vvp_page_make_ready,
416                 },
417         },
418 };
419
420 static int vvp_transient_page_prep(const struct lu_env *env,
421                                    const struct cl_page_slice *slice,
422                                    struct cl_io *unused)
423 {
424         ENTRY;
425         /* transient page should always be sent. */
426         RETURN(0);
427 }
428
429 static void vvp_transient_page_verify(const struct cl_page *page)
430 {
431 }
432
433 static int vvp_transient_page_own(const struct lu_env *env,
434                                   const struct cl_page_slice *slice,
435                                   struct cl_io *unused, int nonblock)
436 {
437         vvp_transient_page_verify(slice->cpl_page);
438         return 0;
439 }
440
441 static void vvp_transient_page_assume(const struct lu_env *env,
442                                       const struct cl_page_slice *slice,
443                                       struct cl_io *unused)
444 {
445         vvp_transient_page_verify(slice->cpl_page);
446 }
447
448 static void vvp_transient_page_unassume(const struct lu_env *env,
449                                         const struct cl_page_slice *slice,
450                                         struct cl_io *unused)
451 {
452         vvp_transient_page_verify(slice->cpl_page);
453 }
454
455 static void vvp_transient_page_disown(const struct lu_env *env,
456                                       const struct cl_page_slice *slice,
457                                       struct cl_io *unused)
458 {
459         vvp_transient_page_verify(slice->cpl_page);
460 }
461
462 static void vvp_transient_page_discard(const struct lu_env *env,
463                                        const struct cl_page_slice *slice,
464                                        struct cl_io *unused)
465 {
466         struct cl_page *page = slice->cpl_page;
467
468         vvp_transient_page_verify(slice->cpl_page);
469
470         /*
471          * For transient pages, remove it from the radix tree.
472          */
473         cl_page_delete(env, page);
474 }
475
476 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
477                                           const struct cl_page_slice *slice)
478 {
479         struct inode    *inode = vvp_object_inode(slice->cpl_obj);
480         int     locked;
481
482         locked = !inode_trylock(inode);
483         if (!locked)
484                 inode_unlock(inode);
485         return locked ? -EBUSY : -ENODATA;
486 }
487
488 static void
489 vvp_transient_page_completion(const struct lu_env *env,
490                               const struct cl_page_slice *slice,
491                               int ioret)
492 {
493         vvp_transient_page_verify(slice->cpl_page);
494 }
495
496 static void vvp_transient_page_fini(const struct lu_env *env,
497                                     struct cl_page_slice *slice,
498                                     struct pagevec *pvec)
499 {
500         struct vvp_page *vpg = cl2vvp_page(slice);
501         struct cl_page *clp = slice->cpl_page;
502         struct vvp_object *clobj = cl2vvp(clp->cp_obj);
503
504         vvp_page_fini_common(vpg, pvec);
505         atomic_dec(&clobj->vob_transient_pages);
506 }
507
508 static const struct cl_page_operations vvp_transient_page_ops = {
509         .cpo_own                = vvp_transient_page_own,
510         .cpo_assume             = vvp_transient_page_assume,
511         .cpo_unassume           = vvp_transient_page_unassume,
512         .cpo_disown             = vvp_transient_page_disown,
513         .cpo_discard            = vvp_transient_page_discard,
514         .cpo_fini               = vvp_transient_page_fini,
515         .cpo_is_vmlocked        = vvp_transient_page_is_vmlocked,
516         .cpo_print              = vvp_page_print,
517         .io = {
518                 [CRT_READ] = {
519                         .cpo_prep       = vvp_transient_page_prep,
520                         .cpo_completion = vvp_transient_page_completion,
521                 },
522                 [CRT_WRITE] = {
523                         .cpo_prep       = vvp_transient_page_prep,
524                         .cpo_completion = vvp_transient_page_completion,
525                 }
526         }
527 };
528
529 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
530                 struct cl_page *page, pgoff_t index)
531 {
532         struct vvp_page *vpg = cl_object_page_slice(obj, page);
533         struct page     *vmpage = page->cp_vmpage;
534
535         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
536
537         vpg->vpg_page = vmpage;
538         get_page(vmpage);
539
540         if (page->cp_type == CPT_CACHEABLE) {
541                 /* in cache, decref in vvp_page_delete */
542                 atomic_inc(&page->cp_ref);
543                 SetPagePrivate(vmpage);
544                 vmpage->private = (unsigned long)page;
545                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
546                                 &vvp_page_ops);
547         } else {
548                 struct vvp_object *clobj = cl2vvp(obj);
549
550                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
551                                 &vvp_transient_page_ops);
552                 atomic_inc(&clobj->vob_transient_pages);
553         }
554         return 0;
555 }