4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_page for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <linux/atomic.h>
45 #include <linux/bitops.h>
47 #include <linux/mutex.h>
48 #include <linux/page-flags.h>
49 #include <linux/pagemap.h>
51 #include <libcfs/libcfs.h>
52 #include "llite_internal.h"
53 #include "vvp_internal.h"
55 /*****************************************************************************
61 static void vvp_page_fini_common(struct vvp_page *vpg)
63 struct page *vmpage = vpg->vpg_page;
65 LASSERT(vmpage != NULL);
66 page_cache_release(vmpage);
69 static void vvp_page_fini(const struct lu_env *env,
70 struct cl_page_slice *slice)
72 struct vvp_page *vpg = cl2vvp_page(slice);
73 struct page *vmpage = vpg->vpg_page;
76 * vmpage->private was already cleared when page was moved into
79 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
80 vvp_page_fini_common(vpg);
83 static int vvp_page_own(const struct lu_env *env,
84 const struct cl_page_slice *slice, struct cl_io *io,
87 struct vvp_page *vpg = cl2vvp_page(slice);
88 struct page *vmpage = vpg->vpg_page;
90 LASSERT(vmpage != NULL);
92 if (!trylock_page(vmpage))
95 if (unlikely(PageWriteback(vmpage))) {
104 wait_on_page_writeback(vmpage);
109 static void vvp_page_assume(const struct lu_env *env,
110 const struct cl_page_slice *slice,
111 struct cl_io *unused)
113 struct page *vmpage = cl2vm_page(slice);
115 LASSERT(vmpage != NULL);
116 LASSERT(PageLocked(vmpage));
117 wait_on_page_writeback(vmpage);
120 static void vvp_page_unassume(const struct lu_env *env,
121 const struct cl_page_slice *slice,
122 struct cl_io *unused)
124 struct page *vmpage = cl2vm_page(slice);
126 LASSERT(vmpage != NULL);
127 LASSERT(PageLocked(vmpage));
130 static void vvp_page_disown(const struct lu_env *env,
131 const struct cl_page_slice *slice, struct cl_io *io)
133 struct page *vmpage = cl2vm_page(slice);
135 LASSERT(vmpage != NULL);
136 LASSERT(PageLocked(vmpage));
138 unlock_page(cl2vm_page(slice));
141 static void vvp_page_discard(const struct lu_env *env,
142 const struct cl_page_slice *slice,
143 struct cl_io *unused)
145 struct page *vmpage = cl2vm_page(slice);
146 struct vvp_page *vpg = cl2vvp_page(slice);
148 LASSERT(vmpage != NULL);
149 LASSERT(PageLocked(vmpage));
151 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
152 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
154 ll_invalidate_page(vmpage);
157 static void vvp_page_delete(const struct lu_env *env,
158 const struct cl_page_slice *slice)
160 struct page *vmpage = cl2vm_page(slice);
161 struct inode *inode = vmpage->mapping->host;
162 struct cl_object *obj = slice->cpl_obj;
163 struct cl_page *page = slice->cpl_page;
166 LASSERT(PageLocked(vmpage));
167 LASSERT((struct cl_page *)vmpage->private == page);
168 LASSERT(inode == vvp_object_inode(obj));
170 /* Drop the reference count held in vvp_page_init */
171 refc = atomic_dec_return(&page->cp_ref);
172 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
174 ClearPageUptodate(vmpage);
175 ClearPagePrivate(vmpage);
178 * Reference from vmpage to cl_page is removed, but the reference back
179 * is still here. It is removed later in vvp_page_fini().
183 static void vvp_page_export(const struct lu_env *env,
184 const struct cl_page_slice *slice,
187 struct page *vmpage = cl2vm_page(slice);
189 LASSERT(vmpage != NULL);
190 LASSERT(PageLocked(vmpage));
192 SetPageUptodate(vmpage);
194 ClearPageUptodate(vmpage);
197 static int vvp_page_is_vmlocked(const struct lu_env *env,
198 const struct cl_page_slice *slice)
200 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
203 static int vvp_page_prep_read(const struct lu_env *env,
204 const struct cl_page_slice *slice,
205 struct cl_io *unused)
208 /* Skip the page already marked as PG_uptodate. */
209 RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
212 static int vvp_page_prep_write(const struct lu_env *env,
213 const struct cl_page_slice *slice,
214 struct cl_io *unused)
216 struct page *vmpage = cl2vm_page(slice);
218 LASSERT(PageLocked(vmpage));
219 LASSERT(!PageDirty(vmpage));
221 set_page_writeback(vmpage);
227 * Handles page transfer errors at VM level.
229 * This takes inode as a separate argument, because inode on which error is to
230 * be set can be different from \a vmpage inode in case of direct-io.
232 static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
234 struct vvp_object *obj = cl_inode2vvp(inode);
237 ClearPageError(vmpage);
238 obj->vob_discard_page_warned = 0;
240 SetPageError(vmpage);
241 if (ioret == -ENOSPC)
242 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
244 set_bit(AS_EIO, &inode->i_mapping->flags);
246 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
247 obj->vob_discard_page_warned == 0) {
248 obj->vob_discard_page_warned = 1;
249 ll_dirty_page_discard_warn(vmpage, ioret);
254 static void vvp_page_completion_read(const struct lu_env *env,
255 const struct cl_page_slice *slice,
258 struct vvp_page *vpg = cl2vvp_page(slice);
259 struct page *vmpage = vpg->vpg_page;
260 struct cl_page *page = slice->cpl_page;
261 struct inode *inode = vvp_object_inode(page->cp_obj);
264 LASSERT(PageLocked(vmpage));
265 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
267 if (vpg->vpg_defer_uptodate)
268 ll_ra_count_put(ll_i2sbi(inode), 1);
271 if (!vpg->vpg_defer_uptodate)
272 cl_page_export(env, page, 1);
274 vpg->vpg_defer_uptodate = 0;
277 if (page->cp_sync_io == NULL)
283 static void vvp_page_completion_write(const struct lu_env *env,
284 const struct cl_page_slice *slice,
287 struct vvp_page *vpg = cl2vvp_page(slice);
288 struct cl_page *pg = slice->cpl_page;
289 struct page *vmpage = vpg->vpg_page;
292 LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
293 LASSERT(PageWriteback(vmpage));
295 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
298 * Only mark the page error only when it's an async write because
299 * applications won't wait for IO to finish.
301 if (pg->cp_sync_io == NULL)
302 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
304 end_page_writeback(vmpage);
309 * Implements cl_page_operations::cpo_make_ready() method.
311 * This is called to yank a page from the transfer cache and to send it out as
312 * a part of transfer. This function try-locks the page. If try-lock failed,
313 * page is owned by some concurrent IO, and should be skipped (this is bad,
314 * but hopefully rare situation, as it usually results in transfer being
315 * shorter than possible).
317 * \retval 0 success, page can be placed into transfer
319 * \retval -EAGAIN page is either used by concurrent IO has been
320 * truncated. Skip it.
322 static int vvp_page_make_ready(const struct lu_env *env,
323 const struct cl_page_slice *slice)
325 struct page *vmpage = cl2vm_page(slice);
326 struct cl_page *pg = slice->cpl_page;
330 if (clear_page_dirty_for_io(vmpage)) {
331 LASSERT(pg->cp_state == CPS_CACHED);
332 /* This actually clears the dirty bit in the radix
334 set_page_writeback(vmpage);
335 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
336 } else if (pg->cp_state == CPS_PAGEOUT) {
337 /* is it possible for osc_flush_async_page() to already
341 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
349 static int vvp_page_print(const struct lu_env *env,
350 const struct cl_page_slice *slice,
351 void *cookie, lu_printer_t printer)
353 struct vvp_page *vpg = cl2vvp_page(slice);
354 struct page *vmpage = vpg->vpg_page;
356 (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) "
358 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
360 if (vmpage != NULL) {
361 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
362 (long)vmpage->flags, page_count(vmpage),
363 page_mapcount(vmpage), vmpage->private,
365 list_empty(&vmpage->lru) ? "not-" : "");
368 (*printer)(env, cookie, "\n");
373 static int vvp_page_fail(const struct lu_env *env,
374 const struct cl_page_slice *slice)
384 static const struct cl_page_operations vvp_page_ops = {
385 .cpo_own = vvp_page_own,
386 .cpo_assume = vvp_page_assume,
387 .cpo_unassume = vvp_page_unassume,
388 .cpo_disown = vvp_page_disown,
389 .cpo_discard = vvp_page_discard,
390 .cpo_delete = vvp_page_delete,
391 .cpo_export = vvp_page_export,
392 .cpo_is_vmlocked = vvp_page_is_vmlocked,
393 .cpo_fini = vvp_page_fini,
394 .cpo_print = vvp_page_print,
397 .cpo_prep = vvp_page_prep_read,
398 .cpo_completion = vvp_page_completion_read,
399 .cpo_make_ready = vvp_page_fail,
402 .cpo_prep = vvp_page_prep_write,
403 .cpo_completion = vvp_page_completion_write,
404 .cpo_make_ready = vvp_page_make_ready,
409 static int vvp_transient_page_prep(const struct lu_env *env,
410 const struct cl_page_slice *slice,
411 struct cl_io *unused)
414 /* transient page should always be sent. */
418 static void vvp_transient_page_verify(const struct cl_page *page)
422 static int vvp_transient_page_own(const struct lu_env *env,
423 const struct cl_page_slice *slice,
424 struct cl_io *unused, int nonblock)
426 vvp_transient_page_verify(slice->cpl_page);
430 static void vvp_transient_page_assume(const struct lu_env *env,
431 const struct cl_page_slice *slice,
432 struct cl_io *unused)
434 vvp_transient_page_verify(slice->cpl_page);
437 static void vvp_transient_page_unassume(const struct lu_env *env,
438 const struct cl_page_slice *slice,
439 struct cl_io *unused)
441 vvp_transient_page_verify(slice->cpl_page);
444 static void vvp_transient_page_disown(const struct lu_env *env,
445 const struct cl_page_slice *slice,
446 struct cl_io *unused)
448 vvp_transient_page_verify(slice->cpl_page);
451 static void vvp_transient_page_discard(const struct lu_env *env,
452 const struct cl_page_slice *slice,
453 struct cl_io *unused)
455 struct cl_page *page = slice->cpl_page;
457 vvp_transient_page_verify(slice->cpl_page);
460 * For transient pages, remove it from the radix tree.
462 cl_page_delete(env, page);
465 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
466 const struct cl_page_slice *slice)
468 struct inode *inode = vvp_object_inode(slice->cpl_obj);
471 locked = !mutex_trylock(&inode->i_mutex);
473 mutex_unlock(&inode->i_mutex);
474 return locked ? -EBUSY : -ENODATA;
478 vvp_transient_page_completion(const struct lu_env *env,
479 const struct cl_page_slice *slice,
482 vvp_transient_page_verify(slice->cpl_page);
485 static void vvp_transient_page_fini(const struct lu_env *env,
486 struct cl_page_slice *slice)
488 struct vvp_page *vpg = cl2vvp_page(slice);
489 struct cl_page *clp = slice->cpl_page;
490 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
492 vvp_page_fini_common(vpg);
493 atomic_dec(&clobj->vob_transient_pages);
496 static const struct cl_page_operations vvp_transient_page_ops = {
497 .cpo_own = vvp_transient_page_own,
498 .cpo_assume = vvp_transient_page_assume,
499 .cpo_unassume = vvp_transient_page_unassume,
500 .cpo_disown = vvp_transient_page_disown,
501 .cpo_discard = vvp_transient_page_discard,
502 .cpo_fini = vvp_transient_page_fini,
503 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
504 .cpo_print = vvp_page_print,
507 .cpo_prep = vvp_transient_page_prep,
508 .cpo_completion = vvp_transient_page_completion,
511 .cpo_prep = vvp_transient_page_prep,
512 .cpo_completion = vvp_transient_page_completion,
517 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
518 struct cl_page *page, pgoff_t index)
520 struct vvp_page *vpg = cl_object_page_slice(obj, page);
521 struct page *vmpage = page->cp_vmpage;
523 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
525 vpg->vpg_page = vmpage;
526 page_cache_get(vmpage);
528 if (page->cp_type == CPT_CACHEABLE) {
529 /* in cache, decref in vvp_page_delete */
530 atomic_inc(&page->cp_ref);
531 SetPagePrivate(vmpage);
532 vmpage->private = (unsigned long)page;
533 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
536 struct vvp_object *clobj = cl2vvp(obj);
538 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
539 &vvp_transient_page_ops);
540 atomic_inc(&clobj->vob_transient_pages);