Whamcloud - gitweb
LU-620 llite: add delete_from_page_cache and remove_from_page_cache check
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #ifndef __KERNEL__
44 # error This file is kernel only.
45 #endif
46
47 #include <obd.h>
48 #include <lustre_lite.h>
49
50 #include "vvp_internal.h"
51
52 /*****************************************************************************
53  *
54  * Page operations.
55  *
56  */
57
58 static void vvp_page_fini_common(struct ccc_page *cp)
59 {
60         cfs_page_t *vmpage = cp->cpg_page;
61
62         LASSERT(vmpage != NULL);
63         page_cache_release(vmpage);
64         OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
65 }
66
67 static void vvp_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct ccc_page *cp = cl2ccc_page(slice);
71         cfs_page_t *vmpage  = cp->cpg_page;
72
73         /*
74          * vmpage->private was already cleared when page was moved into
75          * VPG_FREEING state.
76          */
77         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78         vvp_page_fini_common(cp);
79 }
80
81 static int vvp_page_own(const struct lu_env *env,
82                         const struct cl_page_slice *slice, struct cl_io *io,
83                         int nonblock)
84 {
85         struct ccc_page *vpg    = cl2ccc_page(slice);
86         cfs_page_t      *vmpage = vpg->cpg_page;
87
88         LASSERT(vmpage != NULL);
89         if (nonblock) {
90                 if (TestSetPageLocked(vmpage))
91                         return -EAGAIN;
92
93                 if (unlikely(PageWriteback(vmpage))) {
94                         unlock_page(vmpage);
95                         return -EAGAIN;
96                 }
97
98                 return 0;
99         }
100
101         lock_page(vmpage);
102         wait_on_page_writeback(vmpage);
103         return 0;
104 }
105
106 static void vvp_page_assume(const struct lu_env *env,
107                             const struct cl_page_slice *slice,
108                             struct cl_io *unused)
109 {
110         cfs_page_t *vmpage = cl2vm_page(slice);
111
112         LASSERT(vmpage != NULL);
113         LASSERT(PageLocked(vmpage));
114         wait_on_page_writeback(vmpage);
115 }
116
117 static void vvp_page_unassume(const struct lu_env *env,
118                               const struct cl_page_slice *slice,
119                               struct cl_io *unused)
120 {
121         cfs_page_t *vmpage = cl2vm_page(slice);
122
123         LASSERT(vmpage != NULL);
124         LASSERT(PageLocked(vmpage));
125 }
126
127 static void vvp_page_disown(const struct lu_env *env,
128                             const struct cl_page_slice *slice, struct cl_io *io)
129 {
130         cfs_page_t *vmpage = cl2vm_page(slice);
131
132         LASSERT(vmpage != NULL);
133         LASSERT(PageLocked(vmpage));
134
135         unlock_page(cl2vm_page(slice));
136 }
137
138 static void vvp_page_discard(const struct lu_env *env,
139                              const struct cl_page_slice *slice,
140                              struct cl_io *unused)
141 {
142         cfs_page_t           *vmpage  = cl2vm_page(slice);
143         struct address_space *mapping = vmpage->mapping;
144         struct ccc_page      *cpg     = cl2ccc_page(slice);
145
146         LASSERT(vmpage != NULL);
147         LASSERT(PageLocked(vmpage));
148
149         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
150                 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
151
152         /*
153          * truncate_complete_page() calls
154          * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
155          */
156         truncate_complete_page(mapping, vmpage);
157 }
158
159 static int vvp_page_unmap(const struct lu_env *env,
160                           const struct cl_page_slice *slice,
161                           struct cl_io *unused)
162 {
163         cfs_page_t *vmpage = cl2vm_page(slice);
164         __u64       offset = vmpage->index << CFS_PAGE_SHIFT;
165
166         LASSERT(vmpage != NULL);
167         LASSERT(PageLocked(vmpage));
168         /*
169          * XXX is it safe to call this with the page lock held?
170          */
171         ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
172         return 0;
173 }
174
175 static void vvp_page_delete(const struct lu_env *env,
176                             const struct cl_page_slice *slice)
177 {
178         cfs_page_t       *vmpage = cl2vm_page(slice);
179         struct inode     *inode  = vmpage->mapping->host;
180         struct cl_object *obj    = slice->cpl_obj;
181
182         LASSERT(PageLocked(vmpage));
183         LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
184         LASSERT(inode == ccc_object_inode(obj));
185
186         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
187         ClearPagePrivate(vmpage);
188         vmpage->private = 0;
189         /*
190          * Reference from vmpage to cl_page is removed, but the reference back
191          * is still here. It is removed later in vvp_page_fini().
192          */
193 }
194
195 static void vvp_page_export(const struct lu_env *env,
196                             const struct cl_page_slice *slice,
197                             int uptodate)
198 {
199         cfs_page_t *vmpage = cl2vm_page(slice);
200
201         LASSERT(vmpage != NULL);
202         LASSERT(PageLocked(vmpage));
203         if (uptodate)
204                 SetPageUptodate(vmpage);
205         else
206                 ClearPageUptodate(vmpage);
207 }
208
209 static int vvp_page_is_vmlocked(const struct lu_env *env,
210                                 const struct cl_page_slice *slice)
211 {
212         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
213 }
214
215 static int vvp_page_prep_read(const struct lu_env *env,
216                               const struct cl_page_slice *slice,
217                               struct cl_io *unused)
218 {
219         ENTRY;
220         /* Skip the page already marked as PG_uptodate. */
221         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
222 }
223
224 static int vvp_page_prep_write(const struct lu_env *env,
225                                const struct cl_page_slice *slice,
226                                struct cl_io *unused)
227 {
228         cfs_page_t *vmpage = cl2vm_page(slice);
229         int result;
230
231         if (clear_page_dirty_for_io(vmpage)) {
232                 set_page_writeback(vmpage);
233                 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
234                 result = 0;
235         } else
236                 result = -EALREADY;
237         return result;
238 }
239
240 /**
241  * Handles page transfer errors at VM level.
242  *
243  * This takes inode as a separate argument, because inode on which error is to
244  * be set can be different from \a vmpage inode in case of direct-io.
245  */
246 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
247 {
248         if (ioret == 0)
249                 ClearPageError(vmpage);
250         else if (ioret != -EINTR) {
251                 SetPageError(vmpage);
252                 if (ioret == -ENOSPC)
253                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
254                 else
255                         set_bit(AS_EIO, &inode->i_mapping->flags);
256         }
257 }
258
259 static void vvp_page_completion_common(const struct lu_env *env,
260                                        struct ccc_page *cp, int ioret)
261 {
262         struct cl_page    *clp    = cp->cpg_cl.cpl_page;
263         cfs_page_t        *vmpage = cp->cpg_page;
264         struct inode      *inode  = ccc_object_inode(clp->cp_obj);
265
266         LINVRNT(cl_page_is_vmlocked(env, clp));
267
268         if (!clp->cp_sync_io && clp->cp_type == CPT_CACHEABLE) {
269                 /*
270                  * Only mark the page error only when it's a cacheable page
271                  * and NOT a sync io.
272                  *
273                  * For sync IO and direct IO(CPT_TRANSIENT), the error is able
274                  * to be seen by application, so we don't need to mark a page
275                  * as error at all.
276                  */
277                 vvp_vmpage_error(inode, vmpage, ioret);
278                 unlock_page(vmpage);
279         }
280 }
281
282 static void vvp_page_completion_read(const struct lu_env *env,
283                                      const struct cl_page_slice *slice,
284                                      int ioret)
285 {
286         struct ccc_page *cp    = cl2ccc_page(slice);
287         struct cl_page  *page  = cl_page_top(slice->cpl_page);
288         struct inode    *inode = ccc_object_inode(page->cp_obj);
289         ENTRY;
290
291         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
292
293         if (cp->cpg_defer_uptodate)
294                 ll_ra_count_put(ll_i2sbi(inode), 1);
295
296         if (ioret == 0)  {
297                 /* XXX: do we need this for transient pages? */
298                 if (!cp->cpg_defer_uptodate)
299                         cl_page_export(env, page, 1);
300         } else
301                 cp->cpg_defer_uptodate = 0;
302         vvp_page_completion_common(env, cp, ioret);
303
304         EXIT;
305 }
306
307 static void vvp_page_completion_write_common(const struct lu_env *env,
308                                              const struct cl_page_slice *slice,
309                                              int ioret)
310 {
311         struct ccc_page *cp = cl2ccc_page(slice);
312
313         /*
314          * TODO: Actually it makes sense to add the page into oap pending
315          * list again and so that we don't need to take the page out from
316          * SoM write pending list, if we just meet a recoverable error,
317          * -ENOMEM, etc.
318          * To implement this, we just need to return a non zero value in
319          * ->cpo_completion method. The underlying transfer should be notified
320          * and then re-add the page into pending transfer queue.  -jay
321          */
322         cp->cpg_write_queued = 0;
323         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
324
325         vvp_page_completion_common(env, cp, ioret);
326 }
327
328 static void vvp_page_completion_write(const struct lu_env *env,
329                                       const struct cl_page_slice *slice,
330                                       int ioret)
331 {
332         struct ccc_page *cp     = cl2ccc_page(slice);
333         struct cl_page  *pg     = slice->cpl_page;
334         cfs_page_t      *vmpage = cp->cpg_page;
335
336         ENTRY;
337
338         LINVRNT(cl_page_is_vmlocked(env, pg));
339         LASSERT(PageWriteback(vmpage));
340
341         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
342
343         vvp_page_completion_write_common(env, slice, ioret);
344         end_page_writeback(vmpage);
345         EXIT;
346 }
347
348 /**
349  * Implements cl_page_operations::cpo_make_ready() method.
350  *
351  * This is called to yank a page from the transfer cache and to send it out as
352  * a part of transfer. This function try-locks the page. If try-lock failed,
353  * page is owned by some concurrent IO, and should be skipped (this is bad,
354  * but hopefully rare situation, as it usually results in transfer being
355  * shorter than possible).
356  *
357  * \retval 0      success, page can be placed into transfer
358  *
359  * \retval -EAGAIN page is either used by concurrent IO has been
360  * truncated. Skip it.
361  */
362 static int vvp_page_make_ready(const struct lu_env *env,
363                                const struct cl_page_slice *slice)
364 {
365         cfs_page_t *vmpage = cl2vm_page(slice);
366         struct cl_page *pg = slice->cpl_page;
367         int result;
368
369         result = -EAGAIN;
370         /* we're trying to write, but the page is locked.. come back later */
371         if (!TestSetPageLocked(vmpage)) {
372                 if (pg->cp_state == CPS_CACHED) {
373                         /*
374                          * We can cancel IO if page wasn't dirty after all.
375                          */
376                         clear_page_dirty_for_io(vmpage);
377                         /*
378                          * This actually clears the dirty bit in the radix
379                          * tree.
380                          */
381                         set_page_writeback(vmpage);
382                         vvp_write_pending(cl2ccc(slice->cpl_obj),
383                                           cl2ccc_page(slice));
384                         CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
385                         result = 0;
386                 } else
387                         /*
388                          * Page was concurrently truncated.
389                          */
390                         LASSERT(pg->cp_state == CPS_FREEING);
391         }
392         RETURN(result);
393 }
394
395 static int vvp_page_print(const struct lu_env *env,
396                           const struct cl_page_slice *slice,
397                           void *cookie, lu_printer_t printer)
398 {
399         struct ccc_page *vp = cl2ccc_page(slice);
400         cfs_page_t      *vmpage = vp->cpg_page;
401
402         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
403                    "vm@%p ",
404                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
405                    vp->cpg_write_queued, vmpage);
406         if (vmpage != NULL) {
407                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
408                            (long)vmpage->flags, page_count(vmpage),
409                            page_mapcount(vmpage), vmpage->private,
410                            page_index(vmpage),
411                            list_empty(&vmpage->lru) ? "not-" : "");
412         }
413         (*printer)(env, cookie, "\n");
414         return 0;
415 }
416
417 static const struct cl_page_operations vvp_page_ops = {
418         .cpo_own           = vvp_page_own,
419         .cpo_assume        = vvp_page_assume,
420         .cpo_unassume      = vvp_page_unassume,
421         .cpo_disown        = vvp_page_disown,
422         .cpo_vmpage        = ccc_page_vmpage,
423         .cpo_discard       = vvp_page_discard,
424         .cpo_delete        = vvp_page_delete,
425         .cpo_unmap         = vvp_page_unmap,
426         .cpo_export        = vvp_page_export,
427         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
428         .cpo_fini          = vvp_page_fini,
429         .cpo_print         = vvp_page_print,
430         .cpo_is_under_lock = ccc_page_is_under_lock,
431         .io = {
432                 [CRT_READ] = {
433                         .cpo_prep        = vvp_page_prep_read,
434                         .cpo_completion  = vvp_page_completion_read,
435                         .cpo_make_ready  = ccc_fail,
436                 },
437                 [CRT_WRITE] = {
438                         .cpo_prep        = vvp_page_prep_write,
439                         .cpo_completion  = vvp_page_completion_write,
440                         .cpo_make_ready  = vvp_page_make_ready,
441                 }
442         }
443 };
444
445 static void vvp_transient_page_verify(const struct cl_page *page)
446 {
447         struct inode *inode = ccc_object_inode(page->cp_obj);
448
449         LASSERT(!TRYLOCK_INODE_MUTEX(inode));
450         /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
451 }
452
453 static int vvp_transient_page_own(const struct lu_env *env,
454                                   const struct cl_page_slice *slice,
455                                   struct cl_io *unused, int nonblock)
456 {
457         vvp_transient_page_verify(slice->cpl_page);
458         return 0;
459 }
460
461 static void vvp_transient_page_assume(const struct lu_env *env,
462                                       const struct cl_page_slice *slice,
463                                       struct cl_io *unused)
464 {
465         vvp_transient_page_verify(slice->cpl_page);
466 }
467
468 static void vvp_transient_page_unassume(const struct lu_env *env,
469                                         const struct cl_page_slice *slice,
470                                         struct cl_io *unused)
471 {
472         vvp_transient_page_verify(slice->cpl_page);
473 }
474
475 static void vvp_transient_page_disown(const struct lu_env *env,
476                                       const struct cl_page_slice *slice,
477                                       struct cl_io *unused)
478 {
479         vvp_transient_page_verify(slice->cpl_page);
480 }
481
482 static void vvp_transient_page_discard(const struct lu_env *env,
483                                        const struct cl_page_slice *slice,
484                                        struct cl_io *unused)
485 {
486         struct cl_page *page = slice->cpl_page;
487
488         vvp_transient_page_verify(slice->cpl_page);
489
490         /*
491          * For transient pages, remove it from the radix tree.
492          */
493         cl_page_delete(env, page);
494 }
495
496 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
497                                           const struct cl_page_slice *slice)
498 {
499         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
500         int              locked;
501
502         locked = !TRYLOCK_INODE_MUTEX(inode);
503         if (!locked)
504                 UNLOCK_INODE_MUTEX(inode);
505         return locked ? -EBUSY : -ENODATA;
506 }
507
508 static void
509 vvp_transient_page_completion_write(const struct lu_env *env,
510                                     const struct cl_page_slice *slice,
511                                     int ioret)
512 {
513         vvp_transient_page_verify(slice->cpl_page);
514         vvp_page_completion_write_common(env, slice, ioret);
515 }
516
517
518 static void vvp_transient_page_fini(const struct lu_env *env,
519                                     struct cl_page_slice *slice)
520 {
521         struct ccc_page *cp = cl2ccc_page(slice);
522         struct cl_page *clp = slice->cpl_page;
523         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
524
525         vvp_page_fini_common(cp);
526         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
527         clobj->cob_transient_pages--;
528 }
529
530 static const struct cl_page_operations vvp_transient_page_ops = {
531         .cpo_own           = vvp_transient_page_own,
532         .cpo_assume        = vvp_transient_page_assume,
533         .cpo_unassume      = vvp_transient_page_unassume,
534         .cpo_disown        = vvp_transient_page_disown,
535         .cpo_discard       = vvp_transient_page_discard,
536         .cpo_vmpage        = ccc_page_vmpage,
537         .cpo_fini          = vvp_transient_page_fini,
538         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
539         .cpo_print         = vvp_page_print,
540         .cpo_is_under_lock = ccc_page_is_under_lock,
541         .io = {
542                 [CRT_READ] = {
543                         .cpo_prep        = ccc_transient_page_prep,
544                         .cpo_completion  = vvp_page_completion_read,
545                 },
546                 [CRT_WRITE] = {
547                         .cpo_prep        = ccc_transient_page_prep,
548                         .cpo_completion  = vvp_transient_page_completion_write,
549                 }
550         }
551 };
552
553 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
554                               struct cl_page *page, cfs_page_t *vmpage)
555 {
556         struct ccc_page *cpg;
557         int result;
558
559         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
560
561         OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
562         if (cpg != NULL) {
563                 cpg->cpg_page = vmpage;
564                 page_cache_get(vmpage);
565
566                 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
567                 if (page->cp_type == CPT_CACHEABLE) {
568                         SetPagePrivate(vmpage);
569                         vmpage->private = (unsigned long)page;
570                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
571                                           &vvp_page_ops);
572                 } else {
573                         struct ccc_object *clobj = cl2ccc(obj);
574
575                         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
576                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
577                                           &vvp_transient_page_ops);
578                         clobj->cob_transient_pages++;
579                 }
580                 result = 0;
581         } else
582                 result = -ENOMEM;
583         return ERR_PTR(result);
584 }
585