4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
46 #include "llite_internal.h"
47 #include "vvp_internal.h"
49 /*****************************************************************************
55 static void vvp_page_fini_common(struct ccc_page *cp)
57 struct page *vmpage = cp->cpg_page;
59 LASSERT(vmpage != NULL);
60 page_cache_release(vmpage);
63 static void vvp_page_fini(const struct lu_env *env,
64 struct cl_page_slice *slice)
66 struct ccc_page *cp = cl2ccc_page(slice);
67 struct page *vmpage = cp->cpg_page;
70 * vmpage->private was already cleared when page was moved into
73 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
74 vvp_page_fini_common(cp);
77 static int vvp_page_own(const struct lu_env *env,
78 const struct cl_page_slice *slice, struct cl_io *io,
81 struct ccc_page *vpg = cl2ccc_page(slice);
82 struct page *vmpage = vpg->cpg_page;
84 LASSERT(vmpage != NULL);
86 if (!trylock_page(vmpage))
89 if (unlikely(PageWriteback(vmpage))) {
98 wait_on_page_writeback(vmpage);
102 static void vvp_page_assume(const struct lu_env *env,
103 const struct cl_page_slice *slice,
104 struct cl_io *unused)
106 struct page *vmpage = cl2vm_page(slice);
108 LASSERT(vmpage != NULL);
109 LASSERT(PageLocked(vmpage));
110 wait_on_page_writeback(vmpage);
113 static void vvp_page_unassume(const struct lu_env *env,
114 const struct cl_page_slice *slice,
115 struct cl_io *unused)
117 struct page *vmpage = cl2vm_page(slice);
119 LASSERT(vmpage != NULL);
120 LASSERT(PageLocked(vmpage));
123 static void vvp_page_disown(const struct lu_env *env,
124 const struct cl_page_slice *slice, struct cl_io *io)
126 struct page *vmpage = cl2vm_page(slice);
128 LASSERT(vmpage != NULL);
129 LASSERT(PageLocked(vmpage));
131 unlock_page(cl2vm_page(slice));
134 static void vvp_page_discard(const struct lu_env *env,
135 const struct cl_page_slice *slice,
136 struct cl_io *unused)
138 struct page *vmpage = cl2vm_page(slice);
139 struct ccc_page *cpg = cl2ccc_page(slice);
141 LASSERT(vmpage != NULL);
142 LASSERT(PageLocked(vmpage));
144 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
145 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
147 ll_invalidate_page(vmpage);
150 static void vvp_page_delete(const struct lu_env *env,
151 const struct cl_page_slice *slice)
153 struct page *vmpage = cl2vm_page(slice);
154 struct inode *inode = vmpage->mapping->host;
155 struct cl_object *obj = slice->cpl_obj;
156 struct cl_page *page = slice->cpl_page;
159 LASSERT(PageLocked(vmpage));
160 LASSERT((struct cl_page *)vmpage->private == page);
161 LASSERT(inode == vvp_object_inode(obj));
163 vvp_write_complete(cl2vvp(obj), cl2ccc_page(slice));
165 /* Drop the reference count held in vvp_page_init */
166 refc = atomic_dec_return(&page->cp_ref);
167 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
169 ClearPageUptodate(vmpage);
170 ClearPagePrivate(vmpage);
173 * Reference from vmpage to cl_page is removed, but the reference back
174 * is still here. It is removed later in vvp_page_fini().
178 static void vvp_page_export(const struct lu_env *env,
179 const struct cl_page_slice *slice,
182 struct page *vmpage = cl2vm_page(slice);
184 LASSERT(vmpage != NULL);
185 LASSERT(PageLocked(vmpage));
187 SetPageUptodate(vmpage);
189 ClearPageUptodate(vmpage);
192 static int vvp_page_is_vmlocked(const struct lu_env *env,
193 const struct cl_page_slice *slice)
195 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
198 static int vvp_page_prep_read(const struct lu_env *env,
199 const struct cl_page_slice *slice,
200 struct cl_io *unused)
203 /* Skip the page already marked as PG_uptodate. */
204 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
207 static int vvp_page_prep_write(const struct lu_env *env,
208 const struct cl_page_slice *slice,
209 struct cl_io *unused)
211 struct page *vmpage = cl2vm_page(slice);
213 LASSERT(PageLocked(vmpage));
214 LASSERT(!PageDirty(vmpage));
216 set_page_writeback(vmpage);
217 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2ccc_page(slice));
223 * Handles page transfer errors at VM level.
225 * This takes inode as a separate argument, because inode on which error is to
226 * be set can be different from \a vmpage inode in case of direct-io.
228 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
230 struct vvp_object *obj = cl_inode2vvp(inode);
233 ClearPageError(vmpage);
234 obj->vob_discard_page_warned = 0;
236 SetPageError(vmpage);
237 if (ioret == -ENOSPC)
238 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
240 set_bit(AS_EIO, &inode->i_mapping->flags);
242 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
243 obj->vob_discard_page_warned == 0) {
244 obj->vob_discard_page_warned = 1;
245 ll_dirty_page_discard_warn(vmpage, ioret);
250 static void vvp_page_completion_read(const struct lu_env *env,
251 const struct cl_page_slice *slice,
254 struct ccc_page *cp = cl2ccc_page(slice);
255 struct page *vmpage = cp->cpg_page;
256 struct cl_page *page = slice->cpl_page;
257 struct inode *inode = vvp_object_inode(page->cp_obj);
260 LASSERT(PageLocked(vmpage));
261 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
263 if (cp->cpg_defer_uptodate)
264 ll_ra_count_put(ll_i2sbi(inode), 1);
267 if (!cp->cpg_defer_uptodate)
268 cl_page_export(env, page, 1);
270 cp->cpg_defer_uptodate = 0;
272 if (page->cp_sync_io == NULL)
278 static void vvp_page_completion_write(const struct lu_env *env,
279 const struct cl_page_slice *slice,
282 struct ccc_page *cp = cl2ccc_page(slice);
283 struct cl_page *pg = slice->cpl_page;
284 struct page *vmpage = cp->cpg_page;
287 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
288 LASSERT(PageWriteback(vmpage));
290 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
293 * TODO: Actually it makes sense to add the page into oap pending
294 * list again and so that we don't need to take the page out from
295 * SoM write pending list, if we just meet a recoverable error,
297 * To implement this, we just need to return a non zero value in
298 * ->cpo_completion method. The underlying transfer should be notified
299 * and then re-add the page into pending transfer queue. -jay
302 cp->cpg_write_queued = 0;
303 vvp_write_complete(cl2vvp(slice->cpl_obj), cp);
306 * Only mark the page error only when it's an async write because
307 * applications won't wait for IO to finish.
309 if (pg->cp_sync_io == NULL)
310 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
312 end_page_writeback(vmpage);
317 * Implements cl_page_operations::cpo_make_ready() method.
319 * This is called to yank a page from the transfer cache and to send it out as
320 * a part of transfer. This function try-locks the page. If try-lock failed,
321 * page is owned by some concurrent IO, and should be skipped (this is bad,
322 * but hopefully rare situation, as it usually results in transfer being
323 * shorter than possible).
325 * \retval 0 success, page can be placed into transfer
327 * \retval -EAGAIN page is either used by concurrent IO has been
328 * truncated. Skip it.
330 static int vvp_page_make_ready(const struct lu_env *env,
331 const struct cl_page_slice *slice)
333 struct page *vmpage = cl2vm_page(slice);
334 struct cl_page *pg = slice->cpl_page;
338 if (clear_page_dirty_for_io(vmpage)) {
339 LASSERT(pg->cp_state == CPS_CACHED);
340 /* This actually clears the dirty bit in the radix
342 set_page_writeback(vmpage);
343 vvp_write_pending(cl2vvp(slice->cpl_obj),
345 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
346 } else if (pg->cp_state == CPS_PAGEOUT) {
347 /* is it possible for osc_flush_async_page() to already
351 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
359 static int vvp_page_is_under_lock(const struct lu_env *env,
360 const struct cl_page_slice *slice,
361 struct cl_io *io, pgoff_t *max_index)
365 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
366 io->ci_type == CIT_FAULT) {
367 struct ccc_io *cio = ccc_env_io(env);
369 if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
370 *max_index = CL_PAGE_EOF;
376 static int vvp_page_print(const struct lu_env *env,
377 const struct cl_page_slice *slice,
378 void *cookie, lu_printer_t printer)
380 struct ccc_page *vp = cl2ccc_page(slice);
381 struct page *vmpage = vp->cpg_page;
383 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
385 vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
386 vp->cpg_write_queued, vmpage);
387 if (vmpage != NULL) {
388 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
389 (long)vmpage->flags, page_count(vmpage),
390 page_mapcount(vmpage), vmpage->private,
392 list_empty(&vmpage->lru) ? "not-" : "");
394 (*printer)(env, cookie, "\n");
398 static const struct cl_page_operations vvp_page_ops = {
399 .cpo_own = vvp_page_own,
400 .cpo_assume = vvp_page_assume,
401 .cpo_unassume = vvp_page_unassume,
402 .cpo_disown = vvp_page_disown,
403 .cpo_discard = vvp_page_discard,
404 .cpo_delete = vvp_page_delete,
405 .cpo_export = vvp_page_export,
406 .cpo_is_vmlocked = vvp_page_is_vmlocked,
407 .cpo_fini = vvp_page_fini,
408 .cpo_print = vvp_page_print,
409 .cpo_is_under_lock = vvp_page_is_under_lock,
412 .cpo_prep = vvp_page_prep_read,
413 .cpo_completion = vvp_page_completion_read,
414 .cpo_make_ready = ccc_fail,
417 .cpo_prep = vvp_page_prep_write,
418 .cpo_completion = vvp_page_completion_write,
419 .cpo_make_ready = vvp_page_make_ready,
424 static void vvp_transient_page_verify(const struct cl_page *page)
428 static int vvp_transient_page_own(const struct lu_env *env,
429 const struct cl_page_slice *slice,
430 struct cl_io *unused, int nonblock)
432 vvp_transient_page_verify(slice->cpl_page);
436 static void vvp_transient_page_assume(const struct lu_env *env,
437 const struct cl_page_slice *slice,
438 struct cl_io *unused)
440 vvp_transient_page_verify(slice->cpl_page);
443 static void vvp_transient_page_unassume(const struct lu_env *env,
444 const struct cl_page_slice *slice,
445 struct cl_io *unused)
447 vvp_transient_page_verify(slice->cpl_page);
450 static void vvp_transient_page_disown(const struct lu_env *env,
451 const struct cl_page_slice *slice,
452 struct cl_io *unused)
454 vvp_transient_page_verify(slice->cpl_page);
457 static void vvp_transient_page_discard(const struct lu_env *env,
458 const struct cl_page_slice *slice,
459 struct cl_io *unused)
461 struct cl_page *page = slice->cpl_page;
463 vvp_transient_page_verify(slice->cpl_page);
466 * For transient pages, remove it from the radix tree.
468 cl_page_delete(env, page);
471 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
472 const struct cl_page_slice *slice)
474 struct inode *inode = vvp_object_inode(slice->cpl_obj);
477 locked = !mutex_trylock(&inode->i_mutex);
479 mutex_unlock(&inode->i_mutex);
480 return locked ? -EBUSY : -ENODATA;
484 vvp_transient_page_completion(const struct lu_env *env,
485 const struct cl_page_slice *slice,
488 vvp_transient_page_verify(slice->cpl_page);
491 static void vvp_transient_page_fini(const struct lu_env *env,
492 struct cl_page_slice *slice)
494 struct ccc_page *cp = cl2ccc_page(slice);
495 struct cl_page *clp = slice->cpl_page;
496 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
498 vvp_page_fini_common(cp);
499 atomic_dec(&clobj->vob_transient_pages);
502 static const struct cl_page_operations vvp_transient_page_ops = {
503 .cpo_own = vvp_transient_page_own,
504 .cpo_assume = vvp_transient_page_assume,
505 .cpo_unassume = vvp_transient_page_unassume,
506 .cpo_disown = vvp_transient_page_disown,
507 .cpo_discard = vvp_transient_page_discard,
508 .cpo_fini = vvp_transient_page_fini,
509 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
510 .cpo_print = vvp_page_print,
511 .cpo_is_under_lock = vvp_page_is_under_lock,
514 .cpo_prep = ccc_transient_page_prep,
515 .cpo_completion = vvp_transient_page_completion,
518 .cpo_prep = ccc_transient_page_prep,
519 .cpo_completion = vvp_transient_page_completion,
524 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
525 struct cl_page *page, pgoff_t index)
527 struct ccc_page *cpg = cl_object_page_slice(obj, page);
528 struct page *vmpage = page->cp_vmpage;
530 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
532 cpg->cpg_page = vmpage;
533 page_cache_get(vmpage);
535 INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
536 if (page->cp_type == CPT_CACHEABLE) {
537 /* in cache, decref in vvp_page_delete */
538 atomic_inc(&page->cp_ref);
539 SetPagePrivate(vmpage);
540 vmpage->private = (unsigned long)page;
541 cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
544 struct vvp_object *clobj = cl2vvp(obj);
546 cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
547 &vvp_transient_page_ops);
548 atomic_inc(&clobj->vob_transient_pages);