Whamcloud - gitweb
LU-8648 all: remove all Sun license and URL references
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * Implementation of cl_page for VVP layer.
33  *
34  *   Author: Nikita Danilov <nikita.danilov@sun.com>
35  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LLITE
39
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/mm.h>
43 #include <linux/mutex.h>
44 #include <linux/page-flags.h>
45 #include <linux/pagemap.h>
46
47 #include <libcfs/libcfs.h>
48 #include "llite_internal.h"
49 #include "vvp_internal.h"
50
51 /*****************************************************************************
52  *
53  * Page operations.
54  *
55  */
56
57 static void vvp_page_fini_common(struct vvp_page *vpg)
58 {
59         struct page *vmpage = vpg->vpg_page;
60
61         LASSERT(vmpage != NULL);
62         put_page(vmpage);
63 }
64
65 static void vvp_page_fini(const struct lu_env *env,
66                           struct cl_page_slice *slice)
67 {
68         struct vvp_page *vpg     = cl2vvp_page(slice);
69         struct page     *vmpage  = vpg->vpg_page;
70
71         /*
72          * vmpage->private was already cleared when page was moved into
73          * VPG_FREEING state.
74          */
75         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
76         vvp_page_fini_common(vpg);
77 }
78
79 static int vvp_page_own(const struct lu_env *env,
80                         const struct cl_page_slice *slice, struct cl_io *io,
81                         int nonblock)
82 {
83         struct vvp_page *vpg    = cl2vvp_page(slice);
84         struct page     *vmpage = vpg->vpg_page;
85
86         LASSERT(vmpage != NULL);
87         if (nonblock) {
88                 if (!trylock_page(vmpage))
89                         return -EAGAIN;
90
91                 if (unlikely(PageWriteback(vmpage))) {
92                         unlock_page(vmpage);
93                         return -EAGAIN;
94                 }
95
96                 return 0;
97         }
98
99         lock_page(vmpage);
100         wait_on_page_writeback(vmpage);
101
102         return 0;
103 }
104
105 static void vvp_page_assume(const struct lu_env *env,
106                             const struct cl_page_slice *slice,
107                             struct cl_io *unused)
108 {
109         struct page *vmpage = cl2vm_page(slice);
110
111         LASSERT(vmpage != NULL);
112         LASSERT(PageLocked(vmpage));
113         wait_on_page_writeback(vmpage);
114 }
115
116 static void vvp_page_unassume(const struct lu_env *env,
117                               const struct cl_page_slice *slice,
118                               struct cl_io *unused)
119 {
120         struct page *vmpage = cl2vm_page(slice);
121
122         LASSERT(vmpage != NULL);
123         LASSERT(PageLocked(vmpage));
124 }
125
126 static void vvp_page_disown(const struct lu_env *env,
127                             const struct cl_page_slice *slice, struct cl_io *io)
128 {
129         struct page *vmpage = cl2vm_page(slice);
130
131         LASSERT(vmpage != NULL);
132         LASSERT(PageLocked(vmpage));
133
134         unlock_page(cl2vm_page(slice));
135 }
136
137 static void vvp_page_discard(const struct lu_env *env,
138                              const struct cl_page_slice *slice,
139                              struct cl_io *unused)
140 {
141         struct page     *vmpage = cl2vm_page(slice);
142         struct vvp_page *vpg    = cl2vvp_page(slice);
143
144         LASSERT(vmpage != NULL);
145         LASSERT(PageLocked(vmpage));
146
147         if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
148                 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
149
150         ll_invalidate_page(vmpage);
151 }
152
153 static void vvp_page_delete(const struct lu_env *env,
154                             const struct cl_page_slice *slice)
155 {
156         struct page      *vmpage = cl2vm_page(slice);
157         struct inode     *inode  = vmpage->mapping->host;
158         struct cl_object *obj    = slice->cpl_obj;
159         struct cl_page   *page   = slice->cpl_page;
160         int refc;
161
162         LASSERT(PageLocked(vmpage));
163         LASSERT((struct cl_page *)vmpage->private == page);
164         LASSERT(inode == vvp_object_inode(obj));
165
166         /* Drop the reference count held in vvp_page_init */
167         refc = atomic_dec_return(&page->cp_ref);
168         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
169
170         ClearPageUptodate(vmpage);
171         ClearPagePrivate(vmpage);
172         vmpage->private = 0;
173         /*
174          * Reference from vmpage to cl_page is removed, but the reference back
175          * is still here. It is removed later in vvp_page_fini().
176          */
177 }
178
179 static void vvp_page_export(const struct lu_env *env,
180                             const struct cl_page_slice *slice,
181                             int uptodate)
182 {
183         struct page *vmpage = cl2vm_page(slice);
184
185         LASSERT(vmpage != NULL);
186         LASSERT(PageLocked(vmpage));
187         if (uptodate)
188                 SetPageUptodate(vmpage);
189         else
190                 ClearPageUptodate(vmpage);
191 }
192
193 static int vvp_page_is_vmlocked(const struct lu_env *env,
194                                 const struct cl_page_slice *slice)
195 {
196         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
197 }
198
199 static int vvp_page_prep_read(const struct lu_env *env,
200                               const struct cl_page_slice *slice,
201                               struct cl_io *unused)
202 {
203         ENTRY;
204         /* Skip the page already marked as PG_uptodate. */
205         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
206 }
207
208 static int vvp_page_prep_write(const struct lu_env *env,
209                                const struct cl_page_slice *slice,
210                                struct cl_io *unused)
211 {
212         struct page *vmpage = cl2vm_page(slice);
213
214         LASSERT(PageLocked(vmpage));
215         LASSERT(!PageDirty(vmpage));
216
217         set_page_writeback(vmpage);
218
219         return 0;
220 }
221
222 /**
223  * Handles page transfer errors at VM level.
224  *
225  * This takes inode as a separate argument, because inode on which error is to
226  * be set can be different from \a vmpage inode in case of direct-io.
227  */
228 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
229 {
230         struct vvp_object *obj = cl_inode2vvp(inode);
231
232         if (ioret == 0) {
233                 ClearPageError(vmpage);
234                 obj->vob_discard_page_warned = 0;
235         } else {
236                 SetPageError(vmpage);
237                 if (ioret == -ENOSPC)
238                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
239                 else
240                         set_bit(AS_EIO, &inode->i_mapping->flags);
241
242                 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
243                      obj->vob_discard_page_warned == 0) {
244                         obj->vob_discard_page_warned = 1;
245                         ll_dirty_page_discard_warn(vmpage, ioret);
246                 }
247         }
248 }
249
250 static void vvp_page_completion_read(const struct lu_env *env,
251                                      const struct cl_page_slice *slice,
252                                      int ioret)
253 {
254         struct vvp_page *vpg    = cl2vvp_page(slice);
255         struct page     *vmpage = vpg->vpg_page;
256         struct cl_page  *page   = slice->cpl_page;
257         struct inode    *inode  = vvp_object_inode(page->cp_obj);
258         ENTRY;
259
260         LASSERT(PageLocked(vmpage));
261         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
262
263         if (vpg->vpg_defer_uptodate)
264                 ll_ra_count_put(ll_i2sbi(inode), 1);
265
266         if (ioret == 0)  {
267                 if (!vpg->vpg_defer_uptodate)
268                         cl_page_export(env, page, 1);
269         } else {
270                 vpg->vpg_defer_uptodate = 0;
271         }
272
273         if (page->cp_sync_io == NULL)
274                 unlock_page(vmpage);
275
276         EXIT;
277 }
278
279 static void vvp_page_completion_write(const struct lu_env *env,
280                                       const struct cl_page_slice *slice,
281                                       int ioret)
282 {
283         struct vvp_page *vpg    = cl2vvp_page(slice);
284         struct cl_page  *pg     = slice->cpl_page;
285         struct page     *vmpage = vpg->vpg_page;
286         ENTRY;
287
288         LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
289         LASSERT(PageWriteback(vmpage));
290
291         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
292
293         /*
294          * Only mark the page error only when it's an async write because
295          * applications won't wait for IO to finish.
296          */
297         if (pg->cp_sync_io == NULL)
298                 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
299
300         end_page_writeback(vmpage);
301         EXIT;
302 }
303
304 /**
305  * Implements cl_page_operations::cpo_make_ready() method.
306  *
307  * This is called to yank a page from the transfer cache and to send it out as
308  * a part of transfer. This function try-locks the page. If try-lock failed,
309  * page is owned by some concurrent IO, and should be skipped (this is bad,
310  * but hopefully rare situation, as it usually results in transfer being
311  * shorter than possible).
312  *
313  * \retval 0      success, page can be placed into transfer
314  *
315  * \retval -EAGAIN page is either used by concurrent IO has been
316  * truncated. Skip it.
317  */
318 static int vvp_page_make_ready(const struct lu_env *env,
319                                const struct cl_page_slice *slice)
320 {
321         struct page *vmpage = cl2vm_page(slice);
322         struct cl_page *pg = slice->cpl_page;
323         int result = 0;
324
325         lock_page(vmpage);
326         if (clear_page_dirty_for_io(vmpage)) {
327                 LASSERT(pg->cp_state == CPS_CACHED);
328                 /* This actually clears the dirty bit in the radix
329                  * tree. */
330                 set_page_writeback(vmpage);
331                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
332         } else if (pg->cp_state == CPS_PAGEOUT) {
333                 /* is it possible for osc_flush_async_page() to already
334                  * make it ready? */
335                 result = -EALREADY;
336         } else {
337                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
338                               pg->cp_state);
339                 LBUG();
340         }
341         unlock_page(vmpage);
342         RETURN(result);
343 }
344
345 static int vvp_page_print(const struct lu_env *env,
346                           const struct cl_page_slice *slice,
347                           void *cookie, lu_printer_t printer)
348 {
349         struct vvp_page *vpg    = cl2vvp_page(slice);
350         struct page     *vmpage = vpg->vpg_page;
351
352         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) "
353                    "vm@%p ",
354                    vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
355
356         if (vmpage != NULL) {
357                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
358                            (long)vmpage->flags, page_count(vmpage),
359                            page_mapcount(vmpage), vmpage->private,
360                            page_index(vmpage),
361                            list_empty(&vmpage->lru) ? "not-" : "");
362         }
363
364         (*printer)(env, cookie, "\n");
365
366         return 0;
367 }
368
369 static int vvp_page_fail(const struct lu_env *env,
370                          const struct cl_page_slice *slice)
371 {
372         /*
373          * Cached read?
374          */
375         LBUG();
376
377         return 0;
378 }
379
380 static const struct cl_page_operations vvp_page_ops = {
381         .cpo_own           = vvp_page_own,
382         .cpo_assume        = vvp_page_assume,
383         .cpo_unassume      = vvp_page_unassume,
384         .cpo_disown        = vvp_page_disown,
385         .cpo_discard       = vvp_page_discard,
386         .cpo_delete        = vvp_page_delete,
387         .cpo_export        = vvp_page_export,
388         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
389         .cpo_fini          = vvp_page_fini,
390         .cpo_print         = vvp_page_print,
391         .io = {
392                 [CRT_READ] = {
393                         .cpo_prep       = vvp_page_prep_read,
394                         .cpo_completion = vvp_page_completion_read,
395                         .cpo_make_ready = vvp_page_fail,
396                 },
397                 [CRT_WRITE] = {
398                         .cpo_prep       = vvp_page_prep_write,
399                         .cpo_completion = vvp_page_completion_write,
400                         .cpo_make_ready = vvp_page_make_ready,
401                 },
402         },
403 };
404
405 static int vvp_transient_page_prep(const struct lu_env *env,
406                                    const struct cl_page_slice *slice,
407                                    struct cl_io *unused)
408 {
409         ENTRY;
410         /* transient page should always be sent. */
411         RETURN(0);
412 }
413
414 static void vvp_transient_page_verify(const struct cl_page *page)
415 {
416 }
417
418 static int vvp_transient_page_own(const struct lu_env *env,
419                                   const struct cl_page_slice *slice,
420                                   struct cl_io *unused, int nonblock)
421 {
422         vvp_transient_page_verify(slice->cpl_page);
423         return 0;
424 }
425
426 static void vvp_transient_page_assume(const struct lu_env *env,
427                                       const struct cl_page_slice *slice,
428                                       struct cl_io *unused)
429 {
430         vvp_transient_page_verify(slice->cpl_page);
431 }
432
433 static void vvp_transient_page_unassume(const struct lu_env *env,
434                                         const struct cl_page_slice *slice,
435                                         struct cl_io *unused)
436 {
437         vvp_transient_page_verify(slice->cpl_page);
438 }
439
440 static void vvp_transient_page_disown(const struct lu_env *env,
441                                       const struct cl_page_slice *slice,
442                                       struct cl_io *unused)
443 {
444         vvp_transient_page_verify(slice->cpl_page);
445 }
446
447 static void vvp_transient_page_discard(const struct lu_env *env,
448                                        const struct cl_page_slice *slice,
449                                        struct cl_io *unused)
450 {
451         struct cl_page *page = slice->cpl_page;
452
453         vvp_transient_page_verify(slice->cpl_page);
454
455         /*
456          * For transient pages, remove it from the radix tree.
457          */
458         cl_page_delete(env, page);
459 }
460
461 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
462                                           const struct cl_page_slice *slice)
463 {
464         struct inode    *inode = vvp_object_inode(slice->cpl_obj);
465         int     locked;
466
467         locked = !inode_trylock(inode);
468         if (!locked)
469                 inode_unlock(inode);
470         return locked ? -EBUSY : -ENODATA;
471 }
472
473 static void
474 vvp_transient_page_completion(const struct lu_env *env,
475                               const struct cl_page_slice *slice,
476                               int ioret)
477 {
478         vvp_transient_page_verify(slice->cpl_page);
479 }
480
481 static void vvp_transient_page_fini(const struct lu_env *env,
482                                     struct cl_page_slice *slice)
483 {
484         struct vvp_page *vpg = cl2vvp_page(slice);
485         struct cl_page *clp = slice->cpl_page;
486         struct vvp_object *clobj = cl2vvp(clp->cp_obj);
487
488         vvp_page_fini_common(vpg);
489         atomic_dec(&clobj->vob_transient_pages);
490 }
491
492 static const struct cl_page_operations vvp_transient_page_ops = {
493         .cpo_own                = vvp_transient_page_own,
494         .cpo_assume             = vvp_transient_page_assume,
495         .cpo_unassume           = vvp_transient_page_unassume,
496         .cpo_disown             = vvp_transient_page_disown,
497         .cpo_discard            = vvp_transient_page_discard,
498         .cpo_fini               = vvp_transient_page_fini,
499         .cpo_is_vmlocked        = vvp_transient_page_is_vmlocked,
500         .cpo_print              = vvp_page_print,
501         .io = {
502                 [CRT_READ] = {
503                         .cpo_prep       = vvp_transient_page_prep,
504                         .cpo_completion = vvp_transient_page_completion,
505                 },
506                 [CRT_WRITE] = {
507                         .cpo_prep       = vvp_transient_page_prep,
508                         .cpo_completion = vvp_transient_page_completion,
509                 }
510         }
511 };
512
513 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
514                 struct cl_page *page, pgoff_t index)
515 {
516         struct vvp_page *vpg = cl_object_page_slice(obj, page);
517         struct page     *vmpage = page->cp_vmpage;
518
519         CLOBINVRNT(env, obj, vvp_object_invariant(obj));
520
521         vpg->vpg_page = vmpage;
522         get_page(vmpage);
523
524         if (page->cp_type == CPT_CACHEABLE) {
525                 /* in cache, decref in vvp_page_delete */
526                 atomic_inc(&page->cp_ref);
527                 SetPagePrivate(vmpage);
528                 vmpage->private = (unsigned long)page;
529                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
530                                 &vvp_page_ops);
531         } else {
532                 struct vvp_object *clobj = cl2vvp(obj);
533
534                 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
535                                 &vvp_transient_page_ops);
536                 atomic_inc(&clobj->vob_transient_pages);
537         }
538         return 0;
539 }