Whamcloud - gitweb
b=19188
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #ifndef __KERNEL__
44 # error This file is kernel only.
45 #endif
46
47 #include <obd.h>
48 #include <lustre_lite.h>
49
50 #include "vvp_internal.h"
51
52 /*****************************************************************************
53  *
54  * Page operations.
55  *
56  */
57
58 static void vvp_page_fini_common(struct ccc_page *cp)
59 {
60         cfs_page_t *vmpage = cp->cpg_page;
61
62         LASSERT(vmpage != NULL);
63         page_cache_release(vmpage);
64         OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
65 }
66
67 static void vvp_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct ccc_page *cp = cl2ccc_page(slice);
71         cfs_page_t *vmpage  = cp->cpg_page;
72
73         /*
74          * vmpage->private was already cleared when page was moved into
75          * VPG_FREEING state.
76          */
77         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78         vvp_page_fini_common(cp);
79 }
80
81 static void vvp_page_own(const struct lu_env *env,
82                          const struct cl_page_slice *slice, struct cl_io *_)
83 {
84         struct ccc_page *vpg    = cl2ccc_page(slice);
85         cfs_page_t      *vmpage = vpg->cpg_page;
86         int count = 0;
87
88         LASSERT(vmpage != NULL);
89
90         /* DEBUG CODE FOR #18881 */
91         while (TestSetPageLocked(vmpage)) {
92                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
93                                      cfs_time_seconds(1)/10);
94                 if (++count > 100) {
95                         CL_PAGE_DEBUG(D_ERROR, env,
96                                       cl_page_top(slice->cpl_page),
97                                       "XXX page %p blocked on acquiring the"
98                                       " lock. process %s/%p, flags %lx\n",
99                                       vmpage, current->comm, current,
100                                       vmpage->flags);
101                         libcfs_debug_dumpstack(NULL);
102                         LCONSOLE_WARN("Reproduced bug #18881,please contact:"
103                                "jay <jinshan.xiong@sun.com>, thanks\n");
104
105                         lock_page(vmpage);
106                         break;
107                 }
108         }
109         /* DEBUG CODE END */
110
111         /* lock_page(vmpage); */
112         wait_on_page_writeback(vmpage);
113 }
114
115 static void vvp_page_assume(const struct lu_env *env,
116                             const struct cl_page_slice *slice, struct cl_io *_)
117 {
118         cfs_page_t *vmpage = cl2vm_page(slice);
119
120         LASSERT(vmpage != NULL);
121         LASSERT(PageLocked(vmpage));
122         wait_on_page_writeback(vmpage);
123 }
124
125 static void vvp_page_unassume(const struct lu_env *env,
126                               const struct cl_page_slice *slice,
127                               struct cl_io *_)
128 {
129         cfs_page_t *vmpage = cl2vm_page(slice);
130
131         LASSERT(vmpage != NULL);
132         LASSERT(PageLocked(vmpage));
133 }
134
135 static void vvp_page_disown(const struct lu_env *env,
136                             const struct cl_page_slice *slice, struct cl_io *io)
137 {
138         cfs_page_t *vmpage = cl2vm_page(slice);
139
140         LASSERT(vmpage != NULL);
141         LASSERT(PageLocked(vmpage));
142
143         unlock_page(cl2vm_page(slice));
144 }
145
146 static void vvp_page_discard(const struct lu_env *env,
147                              const struct cl_page_slice *slice, struct cl_io *_)
148 {
149         cfs_page_t           *vmpage  = cl2vm_page(slice);
150         struct address_space *mapping = vmpage->mapping;
151         struct ccc_page      *cpg     = cl2ccc_page(slice);
152
153         LASSERT(vmpage != NULL);
154         LASSERT(PageLocked(vmpage));
155
156         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
157                 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
158
159         /*
160          * truncate_complete_page() calls
161          * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
162          */
163         truncate_complete_page(mapping, vmpage);
164 }
165
166 static int vvp_page_unmap(const struct lu_env *env,
167                           const struct cl_page_slice *slice, struct cl_io *_)
168 {
169         cfs_page_t *vmpage = cl2vm_page(slice);
170         __u64       offset = vmpage->index << CFS_PAGE_SHIFT;
171
172         LASSERT(vmpage != NULL);
173         LASSERT(PageLocked(vmpage));
174         /*
175          * XXX is it safe to call this with the page lock held?
176          */
177         ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
178         return 0;
179 }
180
181 static void vvp_page_delete(const struct lu_env *env,
182                             const struct cl_page_slice *slice)
183 {
184         cfs_page_t       *vmpage = cl2vm_page(slice);
185         struct inode     *inode  = vmpage->mapping->host;
186         struct cl_object *obj    = slice->cpl_obj;
187
188         LASSERT(PageLocked(vmpage));
189         LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
190         LASSERT(inode == ccc_object_inode(obj));
191
192         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
193         ClearPagePrivate(vmpage);
194         vmpage->private = 0;
195         /*
196          * Reference from vmpage to cl_page is removed, but the reference back
197          * is still here. It is removed later in vvp_page_fini().
198          */
199 }
200
201 static void vvp_page_export(const struct lu_env *env,
202                             const struct cl_page_slice *slice)
203 {
204         cfs_page_t *vmpage = cl2vm_page(slice);
205
206         LASSERT(vmpage != NULL);
207         LASSERT(PageLocked(vmpage));
208         SetPageUptodate(vmpage);
209 }
210
211 static int vvp_page_is_vmlocked(const struct lu_env *env,
212                                 const struct cl_page_slice *slice)
213 {
214         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
215 }
216
217 static int vvp_page_prep_read(const struct lu_env *env,
218                               const struct cl_page_slice *slice,
219                               struct cl_io *_)
220 {
221         ENTRY;
222         /* Skip the page already marked as PG_uptodate. */
223         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
224 }
225
226 static int vvp_page_prep_write(const struct lu_env *env,
227                                const struct cl_page_slice *slice,
228                                struct cl_io *_)
229 {
230         cfs_page_t *vmpage = cl2vm_page(slice);
231         int result;
232
233         if (clear_page_dirty_for_io(vmpage)) {
234                 set_page_writeback(vmpage);
235                 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
236                 result = 0;
237         } else
238                 result = -EALREADY;
239         return result;
240 }
241
242 /**
243  * Handles page transfer errors at VM level.
244  *
245  * This takes inode as a separate argument, because inode on which error is to
246  * be set can be different from \a vmpage inode in case of direct-io.
247  */
248 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
249 {
250         if (ioret == 0)
251                 ClearPageError(vmpage);
252         else if (ioret != -EINTR) {
253                 SetPageError(vmpage);
254                 if (ioret == -ENOSPC)
255                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
256                 else
257                         set_bit(AS_EIO, &inode->i_mapping->flags);
258         }
259 }
260
261 static void vvp_page_completion_common(const struct lu_env *env,
262                                        struct ccc_page *cp, int ioret)
263 {
264         struct cl_page    *clp    = cp->cpg_cl.cpl_page;
265         cfs_page_t        *vmpage = cp->cpg_page;
266         struct inode      *inode  = ccc_object_inode(clp->cp_obj);
267         struct cl_sync_io *anchor = cp->cpg_sync_io;
268
269         LINVRNT(cl_page_is_vmlocked(env, clp));
270
271         if (anchor != NULL) {
272                 cp->cpg_sync_io  = NULL;
273                 cl_sync_io_note(anchor, ioret);
274         } else if (clp->cp_type == CPT_CACHEABLE) {
275                 /*
276                  * Only mark the page error only when it's a cacheable page
277                  * and NOT a sync io.
278                  *
279                  * For sync IO and direct IO(CPT_TRANSIENT), the error is able
280                  * to be seen by application, so we don't need to mark a page
281                  * as error at all.
282                  */
283                 vvp_vmpage_error(inode, vmpage, ioret);
284                 unlock_page(vmpage);
285         }
286 }
287
288 static void vvp_page_completion_read(const struct lu_env *env,
289                                      const struct cl_page_slice *slice,
290                                      int ioret)
291 {
292         struct ccc_page *cp    = cl2ccc_page(slice);
293         struct cl_page  *page  = cl_page_top(slice->cpl_page);
294         struct inode    *inode = ccc_object_inode(page->cp_obj);
295         ENTRY;
296
297         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
298
299         if (cp->cpg_defer_uptodate)
300                 ll_ra_count_put(ll_i2sbi(inode), 1);
301
302         if (ioret == 0)  {
303                 /* XXX: do we need this for transient pages? */
304                 if (!cp->cpg_defer_uptodate)
305                         cl_page_export(env, page);
306         } else
307                 cp->cpg_defer_uptodate = 0;
308         vvp_page_completion_common(env, cp, ioret);
309
310         EXIT;
311 }
312
313 static void vvp_page_completion_write_common(const struct lu_env *env,
314                                              const struct cl_page_slice *slice,
315                                              int ioret)
316 {
317         struct ccc_page *cp = cl2ccc_page(slice);
318
319         /*
320          * TODO: Actually it makes sense to add the page into oap pending
321          * list again and so that we don't need to take the page out from
322          * SoM write pending list, if we just meet a recoverable error,
323          * -ENOMEM, etc.
324          * To implement this, we just need to return a non zero value in
325          * ->cpo_completion method. The underlying transfer should be notified
326          * and then re-add the page into pending transfer queue.  -jay
327          */
328         cp->cpg_write_queued = 0;
329         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
330
331         vvp_page_completion_common(env, cp, ioret);
332 }
333
334 static void vvp_page_completion_write(const struct lu_env *env,
335                                       const struct cl_page_slice *slice,
336                                       int ioret)
337 {
338         struct ccc_page *cp     = cl2ccc_page(slice);
339         struct cl_page  *pg     = slice->cpl_page;
340         cfs_page_t      *vmpage = cp->cpg_page;
341
342         ENTRY;
343
344         LINVRNT(cl_page_is_vmlocked(env, pg));
345         LASSERT(PageWriteback(vmpage));
346
347         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
348
349         vvp_page_completion_write_common(env, slice, ioret);
350         end_page_writeback(vmpage);
351         EXIT;
352 }
353
354 /**
355  * Implements cl_page_operations::cpo_make_ready() method.
356  *
357  * This is called to yank a page from the transfer cache and to send it out as
358  * a part of transfer. This function try-locks the page. If try-lock failed,
359  * page is owned by some concurrent IO, and should be skipped (this is bad,
360  * but hopefully rare situation, as it usually results in transfer being
361  * shorter than possible).
362  *
363  * \retval 0      success, page can be placed into transfer
364  *
365  * \retval -EAGAIN page is either used by concurrent IO has been
366  * truncated. Skip it.
367  */
368 static int vvp_page_make_ready(const struct lu_env *env,
369                                const struct cl_page_slice *slice)
370 {
371         cfs_page_t *vmpage = cl2vm_page(slice);
372         struct cl_page *pg = slice->cpl_page;
373         int result;
374
375         result = -EAGAIN;
376         /* we're trying to write, but the page is locked.. come back later */
377         if (!TestSetPageLocked(vmpage)) {
378                 if (pg->cp_state == CPS_CACHED) {
379                         /*
380                          * We can cancel IO if page wasn't dirty after all.
381                          */
382                         clear_page_dirty_for_io(vmpage);
383                         /*
384                          * This actually clears the dirty bit in the radix
385                          * tree.
386                          */
387                         set_page_writeback(vmpage);
388                         vvp_write_pending(cl2ccc(slice->cpl_obj),
389                                           cl2ccc_page(slice));
390                         CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
391                         result = 0;
392                 } else
393                         /*
394                          * Page was concurrently truncated.
395                          */
396                         LASSERT(pg->cp_state == CPS_FREEING);
397         }
398         RETURN(result);
399 }
400
401 static int vvp_page_print(const struct lu_env *env,
402                           const struct cl_page_slice *slice,
403                           void *cookie, lu_printer_t printer)
404 {
405         struct ccc_page *vp = cl2ccc_page(slice);
406         cfs_page_t      *vmpage = vp->cpg_page;
407
408         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
409                    "vm@%p ",
410                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
411                    vp->cpg_write_queued, vmpage);
412         if (vmpage != NULL) {
413                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
414                            (long)vmpage->flags, page_count(vmpage),
415                            page_mapcount(vmpage), vmpage->private,
416                            page_index(vmpage),
417                            list_empty(&vmpage->lru) ? "not-" : "");
418         }
419         (*printer)(env, cookie, "\n");
420         return 0;
421 }
422
423 static const struct cl_page_operations vvp_page_ops = {
424         .cpo_own           = vvp_page_own,
425         .cpo_assume        = vvp_page_assume,
426         .cpo_unassume      = vvp_page_unassume,
427         .cpo_disown        = vvp_page_disown,
428         .cpo_vmpage        = ccc_page_vmpage,
429         .cpo_discard       = vvp_page_discard,
430         .cpo_delete        = vvp_page_delete,
431         .cpo_unmap         = vvp_page_unmap,
432         .cpo_export        = vvp_page_export,
433         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
434         .cpo_fini          = vvp_page_fini,
435         .cpo_print         = vvp_page_print,
436         .cpo_is_under_lock = ccc_page_is_under_lock,
437         .io = {
438                 [CRT_READ] = {
439                         .cpo_prep        = vvp_page_prep_read,
440                         .cpo_completion  = vvp_page_completion_read,
441                         .cpo_make_ready  = ccc_fail,
442                 },
443                 [CRT_WRITE] = {
444                         .cpo_prep        = vvp_page_prep_write,
445                         .cpo_completion  = vvp_page_completion_write,
446                         .cpo_make_ready  = vvp_page_make_ready,
447                 }
448         }
449 };
450
451 static void vvp_transient_page_verify(const struct cl_page *page)
452 {
453         struct inode *inode = ccc_object_inode(page->cp_obj);
454
455         LASSERT(!TRYLOCK_INODE_MUTEX(inode));
456         /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
457 }
458
459 static void vvp_transient_page_own(const struct lu_env *env,
460                                    const struct cl_page_slice *slice,
461                                    struct cl_io *_)
462 {
463         vvp_transient_page_verify(slice->cpl_page);
464 }
465
466 static void vvp_transient_page_assume(const struct lu_env *env,
467                                       const struct cl_page_slice *slice,
468                                       struct cl_io *_)
469 {
470         vvp_transient_page_verify(slice->cpl_page);
471 }
472
473 static void vvp_transient_page_unassume(const struct lu_env *env,
474                                         const struct cl_page_slice *slice,
475                                         struct cl_io *_)
476 {
477         vvp_transient_page_verify(slice->cpl_page);
478 }
479
480 static void vvp_transient_page_disown(const struct lu_env *env,
481                                       const struct cl_page_slice *slice,
482                                       struct cl_io *_)
483 {
484         vvp_transient_page_verify(slice->cpl_page);
485 }
486
487 static void vvp_transient_page_discard(const struct lu_env *env,
488                                        const struct cl_page_slice *slice,
489                                        struct cl_io *_)
490 {
491         struct cl_page *page = slice->cpl_page;
492
493         vvp_transient_page_verify(slice->cpl_page);
494
495         /*
496          * For transient pages, remove it from the radix tree.
497          */
498         cl_page_delete(env, page);
499 }
500
501 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
502                                           const struct cl_page_slice *slice)
503 {
504         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
505         int              locked;
506
507         locked = !TRYLOCK_INODE_MUTEX(inode);
508         if (!locked)
509                 UNLOCK_INODE_MUTEX(inode);
510         return locked ? -EBUSY : -ENODATA;
511 }
512
513 static void
514 vvp_transient_page_completion_write(const struct lu_env *env,
515                                     const struct cl_page_slice *slice,
516                                     int ioret)
517 {
518         vvp_transient_page_verify(slice->cpl_page);
519         vvp_page_completion_write_common(env, slice, ioret);
520 }
521
522
523 static void vvp_transient_page_fini(const struct lu_env *env,
524                                     struct cl_page_slice *slice)
525 {
526         struct ccc_page *cp = cl2ccc_page(slice);
527         struct cl_page *clp = slice->cpl_page;
528         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
529
530         vvp_page_fini_common(cp);
531         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
532         clobj->cob_transient_pages--;
533 }
534
535 static const struct cl_page_operations vvp_transient_page_ops = {
536         .cpo_own           = vvp_transient_page_own,
537         .cpo_assume        = vvp_transient_page_assume,
538         .cpo_unassume      = vvp_transient_page_unassume,
539         .cpo_disown        = vvp_transient_page_disown,
540         .cpo_discard       = vvp_transient_page_discard,
541         .cpo_vmpage        = ccc_page_vmpage,
542         .cpo_fini          = vvp_transient_page_fini,
543         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
544         .cpo_print         = vvp_page_print,
545         .cpo_is_under_lock = ccc_page_is_under_lock,
546         .io = {
547                 [CRT_READ] = {
548                         .cpo_prep        = ccc_transient_page_prep,
549                         .cpo_completion  = vvp_page_completion_read,
550                 },
551                 [CRT_WRITE] = {
552                         .cpo_prep        = ccc_transient_page_prep,
553                         .cpo_completion  = vvp_transient_page_completion_write,
554                 }
555         }
556 };
557
558 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
559                               struct cl_page *page, cfs_page_t *vmpage)
560 {
561         struct ccc_page *cpg;
562         int result;
563
564         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
565
566         OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
567         if (cpg != NULL) {
568                 cpg->cpg_page = vmpage;
569                 page_cache_get(vmpage);
570
571                 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
572                 if (page->cp_type == CPT_CACHEABLE) {
573                         SetPagePrivate(vmpage);
574                         vmpage->private = (unsigned long)page;
575                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
576                                           &vvp_page_ops);
577                 } else {
578                         struct ccc_object *clobj = cl2ccc(obj);
579
580                         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
581                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
582                                           &vvp_transient_page_ops);
583                         clobj->cob_transient_pages++;
584                 }
585                 result = 0;
586         } else
587                 result = -ENOMEM;
588         return ERR_PTR(result);
589 }
590