Whamcloud - gitweb
7285045a2f5b641322d573decb86e5d5363d5f02
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #ifndef __KERNEL__
44 # error This file is kernel only.
45 #endif
46
47 #include <obd.h>
48 #include <lustre_lite.h>
49
50 #include "vvp_internal.h"
51
52 /*****************************************************************************
53  *
54  * Page operations.
55  *
56  */
57
58 static void vvp_page_fini_common(struct ccc_page *cp)
59 {
60         cfs_page_t *vmpage = cp->cpg_page;
61
62         LASSERT(vmpage != NULL);
63         page_cache_release(vmpage);
64         OBD_SLAB_FREE_PTR(cp, vvp_page_kmem);
65 }
66
67 static void vvp_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct ccc_page *cp = cl2ccc_page(slice);
71         cfs_page_t *vmpage  = cp->cpg_page;
72
73         /*
74          * vmpage->private was already cleared when page was moved into
75          * VPG_FREEING state.
76          */
77         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78         vvp_page_fini_common(cp);
79 }
80
81 static void vvp_page_own(const struct lu_env *env,
82                          const struct cl_page_slice *slice, struct cl_io *io)
83 {
84         struct ccc_page *vpg    = cl2ccc_page(slice);
85         cfs_page_t      *vmpage = vpg->cpg_page;
86         int count = 0;
87
88         LASSERT(vmpage != NULL);
89
90         /* DEBUG CODE FOR #18881 */
91         while (TestSetPageLocked(vmpage)) {
92                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
93                                      cfs_time_seconds(1)/10);
94                 if (++count > 600) {
95                         CL_PAGE_DEBUG(D_ERROR, env,
96                                       cl_page_top(slice->cpl_page),
97                                       "XXX page %p blocked on acquiring the"
98                                       " lock. process %s/%p, flags %lx,io %p\n",
99                                       vmpage, current->comm, current,
100                                       vmpage->flags, io);
101                         libcfs_debug_dumpstack(NULL);
102                         if (slice->cpl_page->cp_task) {
103                                 cfs_task_t *tsk = slice->cpl_page->cp_task;
104                                 LCONSOLE_WARN("The page was owned by %s\n",
105                                               tsk->comm);
106                                 libcfs_debug_dumpstack(tsk);
107                         }
108                         LCONSOLE_WARN("Reproduced bug #18881,please contact:"
109                                "jay <jinshan.xiong@sun.com>, thanks\n");
110
111                         lock_page(vmpage);
112                         break;
113                 }
114         }
115         /* DEBUG CODE END */
116
117         /* lock_page(vmpage); */
118         wait_on_page_writeback(vmpage);
119 }
120
121 static void vvp_page_assume(const struct lu_env *env,
122                             const struct cl_page_slice *slice,
123                             struct cl_io *unused)
124 {
125         cfs_page_t *vmpage = cl2vm_page(slice);
126
127         LASSERT(vmpage != NULL);
128         LASSERT(PageLocked(vmpage));
129         wait_on_page_writeback(vmpage);
130 }
131
132 static void vvp_page_unassume(const struct lu_env *env,
133                               const struct cl_page_slice *slice,
134                               struct cl_io *unused)
135 {
136         cfs_page_t *vmpage = cl2vm_page(slice);
137
138         LASSERT(vmpage != NULL);
139         LASSERT(PageLocked(vmpage));
140 }
141
142 static void vvp_page_disown(const struct lu_env *env,
143                             const struct cl_page_slice *slice, struct cl_io *io)
144 {
145         cfs_page_t *vmpage = cl2vm_page(slice);
146
147         LASSERT(vmpage != NULL);
148         LASSERT(PageLocked(vmpage));
149
150         unlock_page(cl2vm_page(slice));
151 }
152
153 static void vvp_page_discard(const struct lu_env *env,
154                              const struct cl_page_slice *slice,
155                              struct cl_io *unused)
156 {
157         cfs_page_t           *vmpage  = cl2vm_page(slice);
158         struct address_space *mapping = vmpage->mapping;
159         struct ccc_page      *cpg     = cl2ccc_page(slice);
160
161         LASSERT(vmpage != NULL);
162         LASSERT(PageLocked(vmpage));
163
164         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
165                 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
166
167         /*
168          * truncate_complete_page() calls
169          * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
170          */
171         truncate_complete_page(mapping, vmpage);
172 }
173
174 static int vvp_page_unmap(const struct lu_env *env,
175                           const struct cl_page_slice *slice,
176                           struct cl_io *unused)
177 {
178         cfs_page_t *vmpage = cl2vm_page(slice);
179         __u64       offset = vmpage->index << CFS_PAGE_SHIFT;
180
181         LASSERT(vmpage != NULL);
182         LASSERT(PageLocked(vmpage));
183         /*
184          * XXX is it safe to call this with the page lock held?
185          */
186         ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
187         return 0;
188 }
189
190 static void vvp_page_delete(const struct lu_env *env,
191                             const struct cl_page_slice *slice)
192 {
193         cfs_page_t       *vmpage = cl2vm_page(slice);
194         struct inode     *inode  = vmpage->mapping->host;
195         struct cl_object *obj    = slice->cpl_obj;
196
197         LASSERT(PageLocked(vmpage));
198         LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
199         LASSERT(inode == ccc_object_inode(obj));
200
201         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
202         ClearPagePrivate(vmpage);
203         vmpage->private = 0;
204         /*
205          * Reference from vmpage to cl_page is removed, but the reference back
206          * is still here. It is removed later in vvp_page_fini().
207          */
208 }
209
210 static void vvp_page_export(const struct lu_env *env,
211                             const struct cl_page_slice *slice,
212                             int uptodate)
213 {
214         cfs_page_t *vmpage = cl2vm_page(slice);
215
216         LASSERT(vmpage != NULL);
217         LASSERT(PageLocked(vmpage));
218         if (uptodate)
219                 SetPageUptodate(vmpage);
220         else
221                 ClearPageUptodate(vmpage);
222 }
223
224 static int vvp_page_is_vmlocked(const struct lu_env *env,
225                                 const struct cl_page_slice *slice)
226 {
227         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
228 }
229
230 static int vvp_page_prep_read(const struct lu_env *env,
231                               const struct cl_page_slice *slice,
232                               struct cl_io *unused)
233 {
234         ENTRY;
235         /* Skip the page already marked as PG_uptodate. */
236         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
237 }
238
239 static int vvp_page_prep_write(const struct lu_env *env,
240                                const struct cl_page_slice *slice,
241                                struct cl_io *unused)
242 {
243         cfs_page_t *vmpage = cl2vm_page(slice);
244         int result;
245
246         if (clear_page_dirty_for_io(vmpage)) {
247                 set_page_writeback(vmpage);
248                 vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
249                 result = 0;
250         } else
251                 result = -EALREADY;
252         return result;
253 }
254
255 /**
256  * Handles page transfer errors at VM level.
257  *
258  * This takes inode as a separate argument, because inode on which error is to
259  * be set can be different from \a vmpage inode in case of direct-io.
260  */
261 static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
262 {
263         if (ioret == 0)
264                 ClearPageError(vmpage);
265         else if (ioret != -EINTR) {
266                 SetPageError(vmpage);
267                 if (ioret == -ENOSPC)
268                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
269                 else
270                         set_bit(AS_EIO, &inode->i_mapping->flags);
271         }
272 }
273
274 static void vvp_page_completion_common(const struct lu_env *env,
275                                        struct ccc_page *cp, int ioret)
276 {
277         struct cl_page    *clp    = cp->cpg_cl.cpl_page;
278         cfs_page_t        *vmpage = cp->cpg_page;
279         struct inode      *inode  = ccc_object_inode(clp->cp_obj);
280         struct cl_sync_io *anchor = cp->cpg_sync_io;
281
282         LINVRNT(cl_page_is_vmlocked(env, clp));
283
284         if (anchor != NULL) {
285                 cp->cpg_sync_io  = NULL;
286                 cl_sync_io_note(anchor, ioret);
287         } else if (clp->cp_type == CPT_CACHEABLE) {
288                 /*
289                  * Only mark the page error only when it's a cacheable page
290                  * and NOT a sync io.
291                  *
292                  * For sync IO and direct IO(CPT_TRANSIENT), the error is able
293                  * to be seen by application, so we don't need to mark a page
294                  * as error at all.
295                  */
296                 vvp_vmpage_error(inode, vmpage, ioret);
297                 unlock_page(vmpage);
298         }
299 }
300
301 static void vvp_page_completion_read(const struct lu_env *env,
302                                      const struct cl_page_slice *slice,
303                                      int ioret)
304 {
305         struct ccc_page *cp    = cl2ccc_page(slice);
306         struct cl_page  *page  = cl_page_top(slice->cpl_page);
307         struct inode    *inode = ccc_object_inode(page->cp_obj);
308         ENTRY;
309
310         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
311
312         if (cp->cpg_defer_uptodate)
313                 ll_ra_count_put(ll_i2sbi(inode), 1);
314
315         if (ioret == 0)  {
316                 /* XXX: do we need this for transient pages? */
317                 if (!cp->cpg_defer_uptodate)
318                         cl_page_export(env, page, 1);
319         } else
320                 cp->cpg_defer_uptodate = 0;
321         vvp_page_completion_common(env, cp, ioret);
322
323         EXIT;
324 }
325
326 static void vvp_page_completion_write_common(const struct lu_env *env,
327                                              const struct cl_page_slice *slice,
328                                              int ioret)
329 {
330         struct ccc_page *cp = cl2ccc_page(slice);
331
332         /*
333          * TODO: Actually it makes sense to add the page into oap pending
334          * list again and so that we don't need to take the page out from
335          * SoM write pending list, if we just meet a recoverable error,
336          * -ENOMEM, etc.
337          * To implement this, we just need to return a non zero value in
338          * ->cpo_completion method. The underlying transfer should be notified
339          * and then re-add the page into pending transfer queue.  -jay
340          */
341         cp->cpg_write_queued = 0;
342         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
343
344         vvp_page_completion_common(env, cp, ioret);
345 }
346
347 static void vvp_page_completion_write(const struct lu_env *env,
348                                       const struct cl_page_slice *slice,
349                                       int ioret)
350 {
351         struct ccc_page *cp     = cl2ccc_page(slice);
352         struct cl_page  *pg     = slice->cpl_page;
353         cfs_page_t      *vmpage = cp->cpg_page;
354
355         ENTRY;
356
357         LINVRNT(cl_page_is_vmlocked(env, pg));
358         LASSERT(PageWriteback(vmpage));
359
360         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
361
362         vvp_page_completion_write_common(env, slice, ioret);
363         end_page_writeback(vmpage);
364         EXIT;
365 }
366
367 /**
368  * Implements cl_page_operations::cpo_make_ready() method.
369  *
370  * This is called to yank a page from the transfer cache and to send it out as
371  * a part of transfer. This function try-locks the page. If try-lock failed,
372  * page is owned by some concurrent IO, and should be skipped (this is bad,
373  * but hopefully rare situation, as it usually results in transfer being
374  * shorter than possible).
375  *
376  * \retval 0      success, page can be placed into transfer
377  *
378  * \retval -EAGAIN page is either used by concurrent IO has been
379  * truncated. Skip it.
380  */
381 static int vvp_page_make_ready(const struct lu_env *env,
382                                const struct cl_page_slice *slice)
383 {
384         cfs_page_t *vmpage = cl2vm_page(slice);
385         struct cl_page *pg = slice->cpl_page;
386         int result;
387
388         result = -EAGAIN;
389         /* we're trying to write, but the page is locked.. come back later */
390         if (!TestSetPageLocked(vmpage)) {
391                 if (pg->cp_state == CPS_CACHED) {
392                         /*
393                          * We can cancel IO if page wasn't dirty after all.
394                          */
395                         clear_page_dirty_for_io(vmpage);
396                         /*
397                          * This actually clears the dirty bit in the radix
398                          * tree.
399                          */
400                         set_page_writeback(vmpage);
401                         vvp_write_pending(cl2ccc(slice->cpl_obj),
402                                           cl2ccc_page(slice));
403                         CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
404                         result = 0;
405                 } else
406                         /*
407                          * Page was concurrently truncated.
408                          */
409                         LASSERT(pg->cp_state == CPS_FREEING);
410         }
411         RETURN(result);
412 }
413
414 static int vvp_page_print(const struct lu_env *env,
415                           const struct cl_page_slice *slice,
416                           void *cookie, lu_printer_t printer)
417 {
418         struct ccc_page *vp = cl2ccc_page(slice);
419         cfs_page_t      *vmpage = vp->cpg_page;
420
421         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
422                    "vm@%p ",
423                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
424                    vp->cpg_write_queued, vmpage);
425         if (vmpage != NULL) {
426                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
427                            (long)vmpage->flags, page_count(vmpage),
428                            page_mapcount(vmpage), vmpage->private,
429                            page_index(vmpage),
430                            list_empty(&vmpage->lru) ? "not-" : "");
431         }
432         (*printer)(env, cookie, "\n");
433         return 0;
434 }
435
436 static const struct cl_page_operations vvp_page_ops = {
437         .cpo_own           = vvp_page_own,
438         .cpo_assume        = vvp_page_assume,
439         .cpo_unassume      = vvp_page_unassume,
440         .cpo_disown        = vvp_page_disown,
441         .cpo_vmpage        = ccc_page_vmpage,
442         .cpo_discard       = vvp_page_discard,
443         .cpo_delete        = vvp_page_delete,
444         .cpo_unmap         = vvp_page_unmap,
445         .cpo_export        = vvp_page_export,
446         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
447         .cpo_fini          = vvp_page_fini,
448         .cpo_print         = vvp_page_print,
449         .cpo_is_under_lock = ccc_page_is_under_lock,
450         .io = {
451                 [CRT_READ] = {
452                         .cpo_prep        = vvp_page_prep_read,
453                         .cpo_completion  = vvp_page_completion_read,
454                         .cpo_make_ready  = ccc_fail,
455                 },
456                 [CRT_WRITE] = {
457                         .cpo_prep        = vvp_page_prep_write,
458                         .cpo_completion  = vvp_page_completion_write,
459                         .cpo_make_ready  = vvp_page_make_ready,
460                 }
461         }
462 };
463
464 static void vvp_transient_page_verify(const struct cl_page *page)
465 {
466         struct inode *inode = ccc_object_inode(page->cp_obj);
467
468         LASSERT(!TRYLOCK_INODE_MUTEX(inode));
469         /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
470 }
471
472 static void vvp_transient_page_own(const struct lu_env *env,
473                                    const struct cl_page_slice *slice,
474                                    struct cl_io *unused)
475 {
476         vvp_transient_page_verify(slice->cpl_page);
477 }
478
479 static void vvp_transient_page_assume(const struct lu_env *env,
480                                       const struct cl_page_slice *slice,
481                                       struct cl_io *unused)
482 {
483         vvp_transient_page_verify(slice->cpl_page);
484 }
485
486 static void vvp_transient_page_unassume(const struct lu_env *env,
487                                         const struct cl_page_slice *slice,
488                                         struct cl_io *unused)
489 {
490         vvp_transient_page_verify(slice->cpl_page);
491 }
492
493 static void vvp_transient_page_disown(const struct lu_env *env,
494                                       const struct cl_page_slice *slice,
495                                       struct cl_io *unused)
496 {
497         vvp_transient_page_verify(slice->cpl_page);
498 }
499
500 static void vvp_transient_page_discard(const struct lu_env *env,
501                                        const struct cl_page_slice *slice,
502                                        struct cl_io *unused)
503 {
504         struct cl_page *page = slice->cpl_page;
505
506         vvp_transient_page_verify(slice->cpl_page);
507
508         /*
509          * For transient pages, remove it from the radix tree.
510          */
511         cl_page_delete(env, page);
512 }
513
514 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
515                                           const struct cl_page_slice *slice)
516 {
517         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
518         int              locked;
519
520         locked = !TRYLOCK_INODE_MUTEX(inode);
521         if (!locked)
522                 UNLOCK_INODE_MUTEX(inode);
523         return locked ? -EBUSY : -ENODATA;
524 }
525
526 static void
527 vvp_transient_page_completion_write(const struct lu_env *env,
528                                     const struct cl_page_slice *slice,
529                                     int ioret)
530 {
531         vvp_transient_page_verify(slice->cpl_page);
532         vvp_page_completion_write_common(env, slice, ioret);
533 }
534
535
536 static void vvp_transient_page_fini(const struct lu_env *env,
537                                     struct cl_page_slice *slice)
538 {
539         struct ccc_page *cp = cl2ccc_page(slice);
540         struct cl_page *clp = slice->cpl_page;
541         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
542
543         vvp_page_fini_common(cp);
544         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
545         clobj->cob_transient_pages--;
546 }
547
548 static const struct cl_page_operations vvp_transient_page_ops = {
549         .cpo_own           = vvp_transient_page_own,
550         .cpo_assume        = vvp_transient_page_assume,
551         .cpo_unassume      = vvp_transient_page_unassume,
552         .cpo_disown        = vvp_transient_page_disown,
553         .cpo_discard       = vvp_transient_page_discard,
554         .cpo_vmpage        = ccc_page_vmpage,
555         .cpo_fini          = vvp_transient_page_fini,
556         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
557         .cpo_print         = vvp_page_print,
558         .cpo_is_under_lock = ccc_page_is_under_lock,
559         .io = {
560                 [CRT_READ] = {
561                         .cpo_prep        = ccc_transient_page_prep,
562                         .cpo_completion  = vvp_page_completion_read,
563                 },
564                 [CRT_WRITE] = {
565                         .cpo_prep        = ccc_transient_page_prep,
566                         .cpo_completion  = vvp_transient_page_completion_write,
567                 }
568         }
569 };
570
571 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
572                               struct cl_page *page, cfs_page_t *vmpage)
573 {
574         struct ccc_page *cpg;
575         int result;
576
577         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
578
579         OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
580         if (cpg != NULL) {
581                 cpg->cpg_page = vmpage;
582                 page_cache_get(vmpage);
583
584                 CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
585                 if (page->cp_type == CPT_CACHEABLE) {
586                         SetPagePrivate(vmpage);
587                         vmpage->private = (unsigned long)page;
588                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
589                                           &vvp_page_ops);
590                 } else {
591                         struct ccc_object *clobj = cl2ccc(obj);
592
593                         LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
594                         cl_page_slice_add(page, &cpg->cpg_cl, obj,
595                                           &vvp_transient_page_ops);
596                         clobj->cob_transient_pages++;
597                 }
598                 result = 0;
599         } else
600                 result = -ENOMEM;
601         return ERR_PTR(result);
602 }
603