Whamcloud - gitweb
2b54a5e3eba77fa5e05409e9d53cb2aaa9f17117
[fs/lustre-release.git] / lustre / llite / vvp_page.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44 #ifndef __KERNEL__
45 # error This file is kernel only.
46 #endif
47
48 #include <obd.h>
49 #include <lustre_lite.h>
50
51 #include "vvp_internal.h"
52
53 /*****************************************************************************
54  *
55  * Page operations.
56  *
57  */
58
59 static void vvp_page_fini_common(struct ccc_page *cp)
60 {
61         struct page *vmpage = cp->cpg_page;
62
63         LASSERT(vmpage != NULL);
64         page_cache_release(vmpage);
65 }
66
67 static void vvp_page_fini(const struct lu_env *env,
68                           struct cl_page_slice *slice)
69 {
70         struct ccc_page *cp = cl2ccc_page(slice);
71         struct page *vmpage  = cp->cpg_page;
72
73         /*
74          * vmpage->private was already cleared when page was moved into
75          * VPG_FREEING state.
76          */
77         LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
78         vvp_page_fini_common(cp);
79 }
80
81 static int vvp_page_own(const struct lu_env *env,
82                         const struct cl_page_slice *slice, struct cl_io *io,
83                         int nonblock)
84 {
85         struct ccc_page *vpg    = cl2ccc_page(slice);
86         struct page      *vmpage = vpg->cpg_page;
87
88         LASSERT(vmpage != NULL);
89         if (nonblock) {
90                 if (!trylock_page(vmpage))
91                         return -EAGAIN;
92
93                 if (unlikely(PageWriteback(vmpage))) {
94                         unlock_page(vmpage);
95                         return -EAGAIN;
96                 }
97
98                 return 0;
99         }
100
101         lock_page(vmpage);
102         wait_on_page_writeback(vmpage);
103         return 0;
104 }
105
106 static void vvp_page_assume(const struct lu_env *env,
107                             const struct cl_page_slice *slice,
108                             struct cl_io *unused)
109 {
110         struct page *vmpage = cl2vm_page(slice);
111
112         LASSERT(vmpage != NULL);
113         LASSERT(PageLocked(vmpage));
114         wait_on_page_writeback(vmpage);
115 }
116
117 static void vvp_page_unassume(const struct lu_env *env,
118                               const struct cl_page_slice *slice,
119                               struct cl_io *unused)
120 {
121         struct page *vmpage = cl2vm_page(slice);
122
123         LASSERT(vmpage != NULL);
124         LASSERT(PageLocked(vmpage));
125 }
126
127 static void vvp_page_disown(const struct lu_env *env,
128                             const struct cl_page_slice *slice, struct cl_io *io)
129 {
130         struct page *vmpage = cl2vm_page(slice);
131
132         LASSERT(vmpage != NULL);
133         LASSERT(PageLocked(vmpage));
134
135         unlock_page(cl2vm_page(slice));
136 }
137
138 static void vvp_page_discard(const struct lu_env *env,
139                              const struct cl_page_slice *slice,
140                              struct cl_io *unused)
141 {
142         struct page     *vmpage = cl2vm_page(slice);
143         struct ccc_page *cpg    = cl2ccc_page(slice);
144
145         LASSERT(vmpage != NULL);
146         LASSERT(PageLocked(vmpage));
147
148         if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
149                 ll_ra_stats_inc(vmpage->mapping, RA_STAT_DISCARDED);
150
151         ll_invalidate_page(vmpage);
152 }
153
154 static void vvp_page_delete(const struct lu_env *env,
155                             const struct cl_page_slice *slice)
156 {
157         struct page      *vmpage = cl2vm_page(slice);
158         struct inode     *inode  = vmpage->mapping->host;
159         struct cl_object *obj    = slice->cpl_obj;
160         struct cl_page   *page   = slice->cpl_page;
161         int refc;
162
163         LASSERT(PageLocked(vmpage));
164         LASSERT((struct cl_page *)vmpage->private == page);
165         LASSERT(inode == ccc_object_inode(obj));
166
167         vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
168
169         /* Drop the reference count held in vvp_page_init */
170         refc = atomic_dec_return(&page->cp_ref);
171         LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
172
173         ClearPageUptodate(vmpage);
174         ClearPagePrivate(vmpage);
175         vmpage->private = 0;
176         /*
177          * Reference from vmpage to cl_page is removed, but the reference back
178          * is still here. It is removed later in vvp_page_fini().
179          */
180 }
181
182 static void vvp_page_export(const struct lu_env *env,
183                             const struct cl_page_slice *slice,
184                             int uptodate)
185 {
186         struct page *vmpage = cl2vm_page(slice);
187
188         LASSERT(vmpage != NULL);
189         LASSERT(PageLocked(vmpage));
190         if (uptodate)
191                 SetPageUptodate(vmpage);
192         else
193                 ClearPageUptodate(vmpage);
194 }
195
196 static int vvp_page_is_vmlocked(const struct lu_env *env,
197                                 const struct cl_page_slice *slice)
198 {
199         return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
200 }
201
202 static int vvp_page_prep_read(const struct lu_env *env,
203                               const struct cl_page_slice *slice,
204                               struct cl_io *unused)
205 {
206         ENTRY;
207         /* Skip the page already marked as PG_uptodate. */
208         RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
209 }
210
211 static int vvp_page_prep_write(const struct lu_env *env,
212                                const struct cl_page_slice *slice,
213                                struct cl_io *unused)
214 {
215         struct page *vmpage = cl2vm_page(slice);
216
217         LASSERT(PageLocked(vmpage));
218         LASSERT(!PageDirty(vmpage));
219
220         set_page_writeback(vmpage);
221         vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
222
223         return 0;
224 }
225
226 /**
227  * Handles page transfer errors at VM level.
228  *
229  * This takes inode as a separate argument, because inode on which error is to
230  * be set can be different from \a vmpage inode in case of direct-io.
231  */
232 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
233 {
234         struct ccc_object *obj = cl_inode2ccc(inode);
235
236         if (ioret == 0) {
237                 ClearPageError(vmpage);
238                 obj->cob_discard_page_warned = 0;
239         } else {
240                 SetPageError(vmpage);
241                 if (ioret == -ENOSPC)
242                         set_bit(AS_ENOSPC, &inode->i_mapping->flags);
243                 else
244                         set_bit(AS_EIO, &inode->i_mapping->flags);
245
246                 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
247                      obj->cob_discard_page_warned == 0) {
248                         obj->cob_discard_page_warned = 1;
249                         ll_dirty_page_discard_warn(vmpage, ioret);
250                 }
251         }
252 }
253
254 static void vvp_page_completion_read(const struct lu_env *env,
255                                      const struct cl_page_slice *slice,
256                                      int ioret)
257 {
258         struct ccc_page *cp     = cl2ccc_page(slice);
259         struct page     *vmpage = cp->cpg_page;
260         struct cl_page  *page   = slice->cpl_page;
261         struct inode    *inode  = ccc_object_inode(page->cp_obj);
262         ENTRY;
263
264         LASSERT(PageLocked(vmpage));
265         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
266
267         if (cp->cpg_defer_uptodate)
268                 ll_ra_count_put(ll_i2sbi(inode), 1);
269
270         if (ioret == 0)  {
271                 if (!cp->cpg_defer_uptodate)
272                         cl_page_export(env, page, 1);
273         } else
274                 cp->cpg_defer_uptodate = 0;
275
276         if (page->cp_sync_io == NULL)
277                 unlock_page(vmpage);
278
279         EXIT;
280 }
281
282 static void vvp_page_completion_write(const struct lu_env *env,
283                                       const struct cl_page_slice *slice,
284                                       int ioret)
285 {
286         struct ccc_page *cp     = cl2ccc_page(slice);
287         struct cl_page  *pg     = slice->cpl_page;
288         struct page      *vmpage = cp->cpg_page;
289         ENTRY;
290
291         LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
292         LASSERT(PageWriteback(vmpage));
293
294         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
295
296         /*
297          * TODO: Actually it makes sense to add the page into oap pending
298          * list again and so that we don't need to take the page out from
299          * SoM write pending list, if we just meet a recoverable error,
300          * -ENOMEM, etc.
301          * To implement this, we just need to return a non zero value in
302          * ->cpo_completion method. The underlying transfer should be notified
303          * and then re-add the page into pending transfer queue.  -jay
304          */
305
306         cp->cpg_write_queued = 0;
307         vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
308
309         /*
310          * Only mark the page error only when it's an async write because
311          * applications won't wait for IO to finish.
312          */
313         if (pg->cp_sync_io == NULL)
314                 vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
315
316         end_page_writeback(vmpage);
317         EXIT;
318 }
319
320 /**
321  * Implements cl_page_operations::cpo_make_ready() method.
322  *
323  * This is called to yank a page from the transfer cache and to send it out as
324  * a part of transfer. This function try-locks the page. If try-lock failed,
325  * page is owned by some concurrent IO, and should be skipped (this is bad,
326  * but hopefully rare situation, as it usually results in transfer being
327  * shorter than possible).
328  *
329  * \retval 0      success, page can be placed into transfer
330  *
331  * \retval -EAGAIN page is either used by concurrent IO has been
332  * truncated. Skip it.
333  */
334 static int vvp_page_make_ready(const struct lu_env *env,
335                                const struct cl_page_slice *slice)
336 {
337         struct page *vmpage = cl2vm_page(slice);
338         struct cl_page *pg = slice->cpl_page;
339         int result = 0;
340
341         lock_page(vmpage);
342         if (clear_page_dirty_for_io(vmpage)) {
343                 LASSERT(pg->cp_state == CPS_CACHED);
344                 /* This actually clears the dirty bit in the radix
345                  * tree. */
346                 set_page_writeback(vmpage);
347                 vvp_write_pending(cl2ccc(slice->cpl_obj),
348                                 cl2ccc_page(slice));
349                 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
350         } else if (pg->cp_state == CPS_PAGEOUT) {
351                 /* is it possible for osc_flush_async_page() to already
352                  * make it ready? */
353                 result = -EALREADY;
354         } else {
355                 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
356                               pg->cp_state);
357                 LBUG();
358         }
359         unlock_page(vmpage);
360         RETURN(result);
361 }
362
363 static int vvp_page_print(const struct lu_env *env,
364                           const struct cl_page_slice *slice,
365                           void *cookie, lu_printer_t printer)
366 {
367         struct ccc_page *vp = cl2ccc_page(slice);
368         struct page      *vmpage = vp->cpg_page;
369
370         (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
371                    "vm@%p ",
372                    vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
373                    vp->cpg_write_queued, vmpage);
374         if (vmpage != NULL) {
375                 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
376                            (long)vmpage->flags, page_count(vmpage),
377                            page_mapcount(vmpage), vmpage->private,
378                            page_index(vmpage),
379                            list_empty(&vmpage->lru) ? "not-" : "");
380         }
381         (*printer)(env, cookie, "\n");
382         return 0;
383 }
384
385 static const struct cl_page_operations vvp_page_ops = {
386         .cpo_own           = vvp_page_own,
387         .cpo_assume        = vvp_page_assume,
388         .cpo_unassume      = vvp_page_unassume,
389         .cpo_disown        = vvp_page_disown,
390         .cpo_discard       = vvp_page_discard,
391         .cpo_delete        = vvp_page_delete,
392         .cpo_export        = vvp_page_export,
393         .cpo_is_vmlocked   = vvp_page_is_vmlocked,
394         .cpo_fini          = vvp_page_fini,
395         .cpo_print         = vvp_page_print,
396         .cpo_is_under_lock = ccc_page_is_under_lock,
397         .io = {
398                 [CRT_READ] = {
399                         .cpo_prep        = vvp_page_prep_read,
400                         .cpo_completion  = vvp_page_completion_read,
401                         .cpo_make_ready  = ccc_fail,
402                 },
403                 [CRT_WRITE] = {
404                         .cpo_prep        = vvp_page_prep_write,
405                         .cpo_completion  = vvp_page_completion_write,
406                         .cpo_make_ready  = vvp_page_make_ready,
407                 }
408         }
409 };
410
411 static void vvp_transient_page_verify(const struct cl_page *page)
412 {
413         struct inode *inode = ccc_object_inode(page->cp_obj);
414
415         LASSERT(!mutex_trylock(&inode->i_mutex));
416 }
417
418 static int vvp_transient_page_own(const struct lu_env *env,
419                                   const struct cl_page_slice *slice,
420                                   struct cl_io *unused, int nonblock)
421 {
422         vvp_transient_page_verify(slice->cpl_page);
423         return 0;
424 }
425
426 static void vvp_transient_page_assume(const struct lu_env *env,
427                                       const struct cl_page_slice *slice,
428                                       struct cl_io *unused)
429 {
430         vvp_transient_page_verify(slice->cpl_page);
431 }
432
433 static void vvp_transient_page_unassume(const struct lu_env *env,
434                                         const struct cl_page_slice *slice,
435                                         struct cl_io *unused)
436 {
437         vvp_transient_page_verify(slice->cpl_page);
438 }
439
440 static void vvp_transient_page_disown(const struct lu_env *env,
441                                       const struct cl_page_slice *slice,
442                                       struct cl_io *unused)
443 {
444         vvp_transient_page_verify(slice->cpl_page);
445 }
446
447 static void vvp_transient_page_discard(const struct lu_env *env,
448                                        const struct cl_page_slice *slice,
449                                        struct cl_io *unused)
450 {
451         struct cl_page *page = slice->cpl_page;
452
453         vvp_transient_page_verify(slice->cpl_page);
454
455         /*
456          * For transient pages, remove it from the radix tree.
457          */
458         cl_page_delete(env, page);
459 }
460
461 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
462                                           const struct cl_page_slice *slice)
463 {
464         struct inode    *inode = ccc_object_inode(slice->cpl_obj);
465         int     locked;
466
467         locked = !mutex_trylock(&inode->i_mutex);
468         if (!locked)
469                 mutex_unlock(&inode->i_mutex);
470         return locked ? -EBUSY : -ENODATA;
471 }
472
473 static void
474 vvp_transient_page_completion(const struct lu_env *env,
475                               const struct cl_page_slice *slice,
476                               int ioret)
477 {
478         vvp_transient_page_verify(slice->cpl_page);
479 }
480
481 static void vvp_transient_page_fini(const struct lu_env *env,
482                                     struct cl_page_slice *slice)
483 {
484         struct ccc_page *cp = cl2ccc_page(slice);
485         struct cl_page *clp = slice->cpl_page;
486         struct ccc_object *clobj = cl2ccc(clp->cp_obj);
487
488         vvp_page_fini_common(cp);
489         LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
490         clobj->cob_transient_pages--;
491 }
492
493 static const struct cl_page_operations vvp_transient_page_ops = {
494         .cpo_own           = vvp_transient_page_own,
495         .cpo_assume        = vvp_transient_page_assume,
496         .cpo_unassume      = vvp_transient_page_unassume,
497         .cpo_disown        = vvp_transient_page_disown,
498         .cpo_discard       = vvp_transient_page_discard,
499         .cpo_fini          = vvp_transient_page_fini,
500         .cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
501         .cpo_print         = vvp_page_print,
502         .cpo_is_under_lock = ccc_page_is_under_lock,
503         .io = {
504                 [CRT_READ] = {
505                         .cpo_prep        = ccc_transient_page_prep,
506                         .cpo_completion  = vvp_transient_page_completion,
507                 },
508                 [CRT_WRITE] = {
509                         .cpo_prep        = ccc_transient_page_prep,
510                         .cpo_completion  = vvp_transient_page_completion,
511                 }
512         }
513 };
514
515 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
516                 struct cl_page *page, pgoff_t index)
517 {
518         struct ccc_page *cpg = cl_object_page_slice(obj, page);
519         struct page     *vmpage = page->cp_vmpage;
520
521         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
522
523         cpg->cpg_cl.cpl_index = index;
524         cpg->cpg_page = vmpage;
525         page_cache_get(vmpage);
526
527         CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
528         if (page->cp_type == CPT_CACHEABLE) {
529                 /* in cache, decref in vvp_page_delete */
530                 atomic_inc(&page->cp_ref);
531                 SetPagePrivate(vmpage);
532                 vmpage->private = (unsigned long)page;
533                 cl_page_slice_add(page, &cpg->cpg_cl, obj,
534                                 &vvp_page_ops);
535         } else {
536                 struct ccc_object *clobj = cl2ccc(obj);
537
538                 LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
539                 cl_page_slice_add(page, &cpg->cpg_cl, obj,
540                                 &vvp_transient_page_ops);
541                 clobj->cob_transient_pages++;
542         }
543         return 0;
544 }
545