Whamcloud - gitweb
cc3b57c2527113368b200da8b25d3144d99f1f8a
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #ifndef __KERNEL__
44 # error This file is kernel only.
45 #endif
46
47 #include <obd.h>
48 #include <lustre_lite.h>
49
50 #include "vvp_internal.h"
51
52 /*****************************************************************************
53  *
54  * Page operations.
55  *
56  */
57
58 static void vvp_page_fini_common(struct ccc_page *cp)
59 {
60         cfs_page_t *vmpage = cp->cpg_page;
61
62         LASSERT(vmpage != NULL);
63         page_cache_release(vmpage);
64         OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
65 }
66
67 static void vvp_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct ccc_page *cp = cl2ccc_page(slice);
71         cfs_page_t *vmpage  = cp->cpg_page;
72
73         /*
74          * vmpage->private was already cleared when page was moved into
75          * VPG_FREEING state.
76          */
77         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78         vvp_page_fini_common(cp);
79 }
80
81 static void vvp_page_own(const struct lu_env *env,
82                          const struct cl_page_slice *slice, struct cl_io *_)
83 {
84         struct ccc_page *vpg    = cl2ccc_page(slice);
85         cfs_page_t      *vmpage = vpg->cpg_page;
86
87         LASSERT(vmpage != NULL);
88         lock_page(vmpage);
89         wait_on_page_writeback(vmpage);
90 }
91
92 static void vvp_page_assume(const struct lu_env *env,
93                             const struct cl_page_slice *slice, struct cl_io *_)
94 {
95         cfs_page_t *vmpage = cl2vm_page(slice);
96
97         LASSERT(vmpage != NULL);
98         LASSERT(PageLocked(vmpage));
99         wait_on_page_writeback(vmpage);
100 }
101
102 static void vvp_page_unassume(const struct lu_env *env,
103                               const struct cl_page_slice *slice,
104                               struct cl_io *_)
105 {
106         cfs_page_t *vmpage = cl2vm_page(slice);
107
108         LASSERT(vmpage != NULL);
109         LASSERT(PageLocked(vmpage));
110 }
111
112 static void vvp_page_disown(const struct lu_env *env,
113                             const struct cl_page_slice *slice, struct cl_io *io)
114 {
115         cfs_page_t *vmpage = cl2vm_page(slice);
116
117         LASSERT(vmpage != NULL);
118         LASSERT(PageLocked(vmpage));
119
120         unlock_page(cl2vm_page(slice));
121 }
122
123 static void vvp_page_discard(const struct lu_env *env,
124                              const struct cl_page_slice *slice, struct cl_io *_)
125 {
126         cfs_page_t           *vmpage  = cl2vm_page(slice);
127         struct address_space *mapping = vmpage->mapping;
128         struct ccc_page      *cpg     = cl2ccc_page(slice);
129
130         LASSERT(vmpage != NULL);
131         LASSERT(PageLocked(vmpage));
132
133         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
134                 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
135
136         /*
137          * truncate_complete_page() calls
138          * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
139          */
140         truncate_complete_page(mapping, vmpage);
141 }
142
143 static int vvp_page_unmap(const struct lu_env *env,
144                           const struct cl_page_slice *slice, struct cl_io *_)
145 {
146         cfs_page_t *vmpage = cl2vm_page(slice);
147         __u64       offset = vmpage->index << CFS_PAGE_SHIFT;
148
149         LASSERT(vmpage != NULL);
150         LASSERT(PageLocked(vmpage));
151         /*
152          * XXX is it safe to call this with the page lock held?
153          */
154         ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
155         return 0;
156 }
157
158 static void vvp_page_delete(const struct lu_env *env,
159                             const struct cl_page_slice *slice)
160 {
161         cfs_page_t       *vmpage = cl2vm_page(slice);
162         struct inode     *inode  = vmpage->mapping->host;
163         struct cl_object *obj    = slice->cpl_obj;
164
165         LASSERT(PageLocked(vmpage));
166         LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
167         LASSERT(inode == ccc_object_inode(obj));
168
169         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
170         ClearPagePrivate(vmpage);
171         vmpage->private = 0;
172         /*
173          * Reference from vmpage to cl_page is removed, but the reference back
174          * is still here. It is removed later in vvp_page_fini().
175          */
176 }
177
178 static void vvp_page_export(const struct lu_env *env,
179                             const struct cl_page_slice *slice)
180 {
181         cfs_page_t *vmpage = cl2vm_page(slice);
182
183         LASSERT(vmpage != NULL);
184         LASSERT(PageLocked(vmpage));
185         SetPageUptodate(vmpage);
186 }
187
188 static int vvp_page_is_vmlocked(const struct lu_env *env,
189                                 const struct cl_page_slice *slice)
190 {
191         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
192 }
193
194 static int vvp_page_prep_read(const struct lu_env *env,
195                               const struct cl_page_slice *slice,
196                               struct cl_io *_)
197 {
198         ENTRY;
199         /* Skip the page already marked as PG_uptodate. */
200         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
201 }
202
203 static int vvp_page_prep_write(const struct lu_env *env,
204                                const struct cl_page_slice *slice,
205                                struct cl_io *_)
206 {
207         cfs_page_t *vmpage = cl2vm_page(slice);
208         int result;
209
210         if (clear_page_dirty_for_io(vmpage)) {
211                 set_page_writeback(vmpage);
212                 result = 0;
213         } else
214                 result = -EALREADY;
215         return result;
216 }
217
218 /**
219  * Handles page transfer errors at VM level.
220  *
221  * This takes inode as a separate argument, because inode on which error is to
222  * be set can be different from \a vmpage inode in case of direct-io.
223  */
224 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
225 {
226         if (ioret == 0)
227                 ClearPageError(vmpage);
228         else if (ioret != -EINTR) {
229                 SetPageError(vmpage);
230                 if (ioret == -ENOSPC)
231                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
232                 else
233                         set_bit(AS_EIO, &inode->i_mapping->flags);
234         }
235 }
236
237 static void vvp_page_completion_common(const struct lu_env *env,
238                                        struct ccc_page *cp, int ioret)
239 {
240         struct cl_page    *clp    = cp->cpg_cl.cpl_page;
241         cfs_page_t        *vmpage = cp->cpg_page;
242         struct inode      *inode  = ccc_object_inode(clp->cp_obj);
243         struct cl_sync_io *anchor = cp->cpg_sync_io;
244
245         LINVRNT(cl_page_is_vmlocked(env, clp));
246
247         if (anchor != NULL) {
248                 cp->cpg_sync_io  = NULL;
249                 cl_sync_io_note(anchor, ioret);
250         } else if (clp->cp_type == CPT_CACHEABLE) {
251                 /*
252                  * Don't assert the page writeback bit here because the lustre
253                  * file may be as a backend of swap space. in this case, the
254                  * page writeback is set by VM, and obvious we shouldn't clear
255                  * it at all. Fortunately this type of pages are all TRANSIENT
256                  * pages.
257                  */
258                 LASSERT(!PageWriteback(vmpage));
259
260                 /*
261                  * Only mark the page error only when it's a cacheable page
262                  * and NOT a sync io.
263                  *
264                  * For sync IO and direct IO(CPT_TRANSIENT), the error is able
265                  * to be seen by application, so we don't need to mark a page
266                  * as error at all.
267                  */
268                 vvp_vmpage_error(inode, vmpage, ioret);
269                 unlock_page(vmpage);
270         }
271 }
272
273 static void vvp_page_completion_read(const struct lu_env *env,
274                                      const struct cl_page_slice *slice,
275                                      int ioret)
276 {
277         struct ccc_page *cp    = cl2ccc_page(slice);
278         struct cl_page  *page  = cl_page_top(slice->cpl_page);
279         struct inode    *inode = ccc_object_inode(page->cp_obj);
280         ENTRY;
281
282         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
283
284         if (cp->cpg_defer_uptodate)
285                 ll_ra_count_put(ll_i2sbi(inode), 1);
286
287         if (ioret == 0)  {
288                 /* XXX: do we need this for transient pages? */
289                 if (!cp->cpg_defer_uptodate)
290                         cl_page_export(env, page);
291         } else
292                 cp->cpg_defer_uptodate = 0;
293         vvp_page_completion_common(env, cp, ioret);
294
295         EXIT;
296 }
297
298 static void vvp_page_completion_write_common(const struct lu_env *env,
299                                              const struct cl_page_slice *slice,
300                                              int ioret)
301 {
302         struct ccc_page *cp = cl2ccc_page(slice);
303
304         /*
305          * TODO: Actually it makes sense to add the page into oap pending
306          * list again and so that we don't need to take the page out from
307          * SoM write pending list, if we just meet a recoverable error,
308          * -ENOMEM, etc.
309          * To implement this, we just need to return a non zero value in
310          * ->cpo_completion method. The underlying transfer should be notified
311          * and then re-add the page into pending transfer queue.  -jay
312          */
313         cp->cpg_write_queued = 0;
314         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
315
316         vvp_page_completion_common(env, cp, ioret);
317 }
318
319 static void vvp_page_completion_write(const struct lu_env *env,
320                                       const struct cl_page_slice *slice,
321                                       int ioret)
322 {
323         struct ccc_page *cp     = cl2ccc_page(slice);
324         struct cl_page  *pg     = slice->cpl_page;
325         cfs_page_t      *vmpage = cp->cpg_page;
326
327         ENTRY;
328
329         LINVRNT(cl_page_is_vmlocked(env, pg));
330         LASSERT(PageWriteback(vmpage));
331
332         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
333
334         end_page_writeback(vmpage);
335         LASSERT(!PageWriteback(vmpage));
336
337         vvp_page_completion_write_common(env, slice, ioret);
338         EXIT;
339 }
340
341 /**
342  * Implements cl_page_operations::cpo_make_ready() method.
343  *
344  * This is called to yank a page from the transfer cache and to send it out as
345  * a part of transfer. This function try-locks the page. If try-lock failed,
346  * page is owned by some concurrent IO, and should be skipped (this is bad,
347  * but hopefully rare situation, as it usually results in transfer being
348  * shorter than possible).
349  *
350  * \retval 0      success, page can be placed into transfer
351  *
352  * \retval -EAGAIN page is either used by concurrent IO has been
353  * truncated. Skip it.
354  */
355 static int vvp_page_make_ready(const struct lu_env *env,
356                                const struct cl_page_slice *slice)
357 {
358         cfs_page_t *vmpage = cl2vm_page(slice);
359         struct cl_page *pg = slice->cpl_page;
360         int result;
361
362         result = -EAGAIN;
363         /* we're trying to write, but the page is locked.. come back later */
364         if (!TestSetPageLocked(vmpage)) {
365                 if (pg->cp_state == CPS_CACHED) {
366                         /*
367                          * We can cancel IO if page wasn't dirty after all.
368                          */
369                         clear_page_dirty_for_io(vmpage);
370                         /*
371                          * This actually clears the dirty bit in the radix
372                          * tree.
373                          */
374                         set_page_writeback(vmpage);
375
376                         CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
377                         result = 0;
378                 } else
379                         /*
380                          * Page was concurrently truncated.
381                          */
382                         LASSERT(pg->cp_state == CPS_FREEING);
383         }
384         RETURN(result);
385 }
386
387 static int vvp_page_print(const struct lu_env *env,
388                           const struct cl_page_slice *slice,
389                           void *cookie, lu_printer_t printer)
390 {
391         struct ccc_page *vp = cl2ccc_page(slice);
392         cfs_page_t      *vmpage = vp->cpg_page;
393
394         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
395                    "vm@%p ",
396                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
397                    vp->cpg_write_queued, vmpage);
398         if (vmpage != NULL) {
399                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
400                            (long)vmpage->flags, page_count(vmpage),
401                            page_mapcount(vmpage), vmpage->private,
402                            page_index(vmpage),
403                            list_empty(&vmpage->lru) ? "not-" : "");
404         }
405         (*printer)(env, cookie, "\n");
406         return 0;
407 }
408
409 static const struct cl_page_operations vvp_page_ops = {
410         .cpo_own           = vvp_page_own,
411         .cpo_assume        = vvp_page_assume,
412         .cpo_unassume      = vvp_page_unassume,
413         .cpo_disown        = vvp_page_disown,
414         .cpo_vmpage        = ccc_page_vmpage,
415         .cpo_discard       = vvp_page_discard,
416         .cpo_delete        = vvp_page_delete,
417         .cpo_unmap         = vvp_page_unmap,
418         .cpo_export        = vvp_page_export,
419         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
420         .cpo_fini          = vvp_page_fini,
421         .cpo_print         = vvp_page_print,
422         .cpo_is_under_lock = ccc_page_is_under_lock,
423         .io = {
424                 [CRT_READ] = {
425                         .cpo_prep        = vvp_page_prep_read,
426                         .cpo_completion  = vvp_page_completion_read,
427                         .cpo_make_ready  = ccc_fail,
428                 },
429                 [CRT_WRITE] = {
430                         .cpo_prep        = vvp_page_prep_write,
431                         .cpo_completion  = vvp_page_completion_write,
432                         .cpo_make_ready  = vvp_page_make_ready,
433                 }
434         }
435 };
436
437 static void vvp_transient_page_verify(const struct cl_page *page)
438 {
439         struct inode *inode = ccc_object_inode(page->cp_obj);
440
441         LASSERT(!TRYLOCK_INODE_MUTEX(inode));
442         /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
443 }
444
445 static void vvp_transient_page_own(const struct lu_env *env,
446                                    const struct cl_page_slice *slice,
447                                    struct cl_io *_)
448 {
449         vvp_transient_page_verify(slice->cpl_page);
450 }
451
452 static void vvp_transient_page_assume(const struct lu_env *env,
453                                       const struct cl_page_slice *slice,
454                                       struct cl_io *_)
455 {
456         vvp_transient_page_verify(slice->cpl_page);
457 }
458
459 static void vvp_transient_page_unassume(const struct lu_env *env,
460                                         const struct cl_page_slice *slice,
461                                         struct cl_io *_)
462 {
463         vvp_transient_page_verify(slice->cpl_page);
464 }
465
466 static void vvp_transient_page_disown(const struct lu_env *env,
467                                       const struct cl_page_slice *slice,
468                                       struct cl_io *_)
469 {
470         vvp_transient_page_verify(slice->cpl_page);
471 }
472
473 static void vvp_transient_page_discard(const struct lu_env *env,
474                                        const struct cl_page_slice *slice,
475                                        struct cl_io *_)
476 {
477         struct cl_page *page = slice->cpl_page;
478
479         vvp_transient_page_verify(slice->cpl_page);
480
481         /*
482          * For transient pages, remove it from the radix tree.
483          */
484         cl_page_delete(env, page);
485 }
486
487 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
488                                           const struct cl_page_slice *slice)
489 {
490         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
491         int              locked;
492
493         locked = !TRYLOCK_INODE_MUTEX(inode);
494         if (!locked)
495                 UNLOCK_INODE_MUTEX(inode);
496         return locked ? -EBUSY : -ENODATA;
497 }
498
499 static void
500 vvp_transient_page_completion_write(const struct lu_env *env,
501                                     const struct cl_page_slice *slice,
502                                     int ioret)
503 {
504         vvp_transient_page_verify(slice->cpl_page);
505         vvp_page_completion_write_common(env, slice, ioret);
506 }
507
508
509 static void vvp_transient_page_fini(const struct lu_env *env,
510                                     struct cl_page_slice *slice)
511 {
512         struct ccc_page *cp = cl2ccc_page(slice);
513         struct cl_page *clp = slice->cpl_page;
514         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
515
516         vvp_page_fini_common(cp);
517         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
518         clobj->cob_transient_pages--;
519 }
520
521 static const struct cl_page_operations vvp_transient_page_ops = {
522         .cpo_own           = vvp_transient_page_own,
523         .cpo_assume        = vvp_transient_page_assume,
524         .cpo_unassume      = vvp_transient_page_unassume,
525         .cpo_disown        = vvp_transient_page_disown,
526         .cpo_discard       = vvp_transient_page_discard,
527         .cpo_vmpage        = ccc_page_vmpage,
528         .cpo_fini          = vvp_transient_page_fini,
529         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
530         .cpo_print         = vvp_page_print,
531         .cpo_is_under_lock = ccc_page_is_under_lock,
532         .io = {
533                 [CRT_READ] = {
534                         .cpo_prep        = ccc_transient_page_prep,
535                         .cpo_completion  = vvp_page_completion_read,
536                 },
537                 [CRT_WRITE] = {
538                         .cpo_prep        = ccc_transient_page_prep,
539                         .cpo_completion  = vvp_transient_page_completion_write,
540                 }
541         }
542 };
543
544 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
545                               struct cl_page *page, cfs_page_t *vmpage)
546 {
547         struct ccc_page *cpg;
548         int result;
549
550         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
551
552         OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
553         if (cpg != NULL) {
554                 cpg->cpg_page = vmpage;
555                 page_cache_get(vmpage);
556
557                 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
558                 if (page->cp_type == CPT_CACHEABLE) {
559                         SetPagePrivate(vmpage);
560                         vmpage->private = (unsigned long)page;
561                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
562                                           &vvp_page_ops);
563                 } else {
564                         struct ccc_object *clobj = cl2ccc(obj);
565
566                         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
567                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
568                                           &vvp_transient_page_ops);
569                         clobj->cob_transient_pages++;
570                 }
571                 result = 0;
572         } else
573                 result = -ENOMEM;
574         return ERR_PTR(result);
575 }
576