*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
-#include <obd.h>
+#include <libcfs/libcfs.h>
#include "llite_internal.h"
#include "vvp_internal.h"
*
*/
-static void vvp_page_fini_common(struct ccc_page *cp)
+static void vvp_page_fini_common(struct vvp_page *vpg)
{
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage != NULL);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
/*
* vmpage->private was already cleared when page was moved into
* VPG_FREEING state.
*/
LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
+ vvp_page_fini_common(vpg);
}
static int vvp_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
+ const struct cl_page_slice *slice, struct cl_io *io,
+ int nonblock)
{
- struct ccc_page *vpg = cl2ccc_page(slice);
- struct page *vmpage = vpg->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage != NULL);
if (nonblock) {
if (!trylock_page(vmpage))
return -EAGAIN;
- if (unlikely(PageWriteback(vmpage))) {
- unlock_page(vmpage);
- return -EAGAIN;
- }
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ return -EAGAIN;
+ }
- return 0;
- }
+ return 0;
+ }
- lock_page(vmpage);
- wait_on_page_writeback(vmpage);
- return 0;
+ lock_page(vmpage);
+ wait_on_page_writeback(vmpage);
+
+ return 0;
}
static void vvp_page_assume(const struct lu_env *env,
}
static void vvp_page_discard(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
- struct ccc_page *cpg = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
LASSERT(vmpage != NULL);
LASSERT(PageLocked(vmpage));
- if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
+ if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
ll_invalidate_page(vmpage);
}
static void vvp_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
struct page *vmpage = cl2vm_page(slice);
struct inode *inode = vmpage->mapping->host;
LASSERT((struct cl_page *)vmpage->private == page);
LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2vvp(obj), cl2ccc_page(slice));
-
/* Drop the reference count held in vvp_page_init */
refc = atomic_dec_return(&page->cp_ref);
LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
- ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
+ struct cl_page *pg = slice->cpl_page;
LASSERT(PageLocked(vmpage));
LASSERT(!PageDirty(vmpage));
- set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj), cl2ccc_page(slice));
+ /* ll_writepage path is not a sync write, so need to set page writeback
+ * flag */
+ if (pg->cp_sync_io == NULL)
+ set_page_writeback(vmpage);
return 0;
}
}
static void vvp_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+ const struct cl_page_slice *slice,
+ int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
struct cl_page *page = slice->cpl_page;
struct inode *inode = vvp_object_inode(page->cp_obj);
- ENTRY;
+ ENTRY;
- LASSERT(PageLocked(vmpage));
- CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
+ LASSERT(PageLocked(vmpage));
+ CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
- if (cp->cpg_defer_uptodate)
- ll_ra_count_put(ll_i2sbi(inode), 1);
+ if (vpg->vpg_defer_uptodate)
+ ll_ra_count_put(ll_i2sbi(inode), 1);
- if (ioret == 0) {
- if (!cp->cpg_defer_uptodate)
- cl_page_export(env, page, 1);
- } else
- cp->cpg_defer_uptodate = 0;
+ if (ioret == 0) {
+ if (!vpg->vpg_defer_uptodate)
+ cl_page_export(env, page, 1);
+ } else {
+ vpg->vpg_defer_uptodate = 0;
+ }
- if (page->cp_sync_io == NULL)
- unlock_page(vmpage);
+ if (page->cp_sync_io == NULL)
+ unlock_page(vmpage);
- EXIT;
+ EXIT;
}
static void vvp_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
+ const struct cl_page_slice *slice,
+ int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
ENTRY;
- LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
- LASSERT(PageWriteback(vmpage));
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+ CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
- /*
- * TODO: Actually it makes sense to add the page into oap pending
- * list again and so that we don't need to take the page out from
- * SoM write pending list, if we just meet a recoverable error,
- * -ENOMEM, etc.
- * To implement this, we just need to return a non zero value in
- * ->cpo_completion method. The underlying transfer should be notified
- * and then re-add the page into pending transfer queue. -jay
- */
-
- cp->cpg_write_queued = 0;
- vvp_write_complete(cl2vvp(slice->cpl_obj), cp);
-
- /*
- * Only mark the page error only when it's an async write because
- * applications won't wait for IO to finish.
- */
- if (pg->cp_sync_io == NULL)
+ if (pg->cp_sync_io != NULL) {
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
+ } else {
+ LASSERT(PageWriteback(vmpage));
+ /*
+ * Only mark the page error only when it's an async write
+ * because applications won't wait for IO to finish.
+ */
vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
- end_page_writeback(vmpage);
- EXIT;
+ end_page_writeback(vmpage);
+ }
+ EXIT;
}
/**
/* This actually clears the dirty bit in the radix
* tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2vvp(slice->cpl_obj),
- cl2ccc_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
RETURN(result);
}
-static int vvp_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io, pgoff_t *max_index)
+static int vvp_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
{
- ENTRY;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
+
+ (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d) "
+ "vm@%p ",
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
+
+ if (vmpage != NULL) {
+ (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
+ (long)vmpage->flags, page_count(vmpage),
+ page_mapcount(vmpage), vmpage->private,
+ page_index(vmpage),
+ list_empty(&vmpage->lru) ? "not-" : "");
+ }
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- struct ccc_io *cio = ccc_env_io(env);
+ (*printer)(env, cookie, "\n");
- if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
- *max_index = CL_PAGE_EOF;
- }
- RETURN(0);
+ return 0;
}
-
-static int vvp_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
+static int vvp_page_fail(const struct lu_env *env,
+ const struct cl_page_slice *slice)
{
- struct ccc_page *vp = cl2ccc_page(slice);
- struct page *vmpage = vp->cpg_page;
-
- (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
- "vm@%p ",
- vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
- vp->cpg_write_queued, vmpage);
- if (vmpage != NULL) {
- (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
- (long)vmpage->flags, page_count(vmpage),
- page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
- list_empty(&vmpage->lru) ? "not-" : "");
- }
- (*printer)(env, cookie, "\n");
- return 0;
+ /*
+ * Cached read?
+ */
+ LBUG();
+
+ return 0;
}
static const struct cl_page_operations vvp_page_ops = {
- .cpo_own = vvp_page_own,
- .cpo_assume = vvp_page_assume,
- .cpo_unassume = vvp_page_unassume,
- .cpo_disown = vvp_page_disown,
- .cpo_discard = vvp_page_discard,
- .cpo_delete = vvp_page_delete,
- .cpo_export = vvp_page_export,
- .cpo_is_vmlocked = vvp_page_is_vmlocked,
- .cpo_fini = vvp_page_fini,
- .cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
- .io = {
- [CRT_READ] = {
- .cpo_prep = vvp_page_prep_read,
- .cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = ccc_fail,
- },
- [CRT_WRITE] = {
- .cpo_prep = vvp_page_prep_write,
- .cpo_completion = vvp_page_completion_write,
- .cpo_make_ready = vvp_page_make_ready,
- }
- }
+ .cpo_own = vvp_page_own,
+ .cpo_assume = vvp_page_assume,
+ .cpo_unassume = vvp_page_unassume,
+ .cpo_disown = vvp_page_disown,
+ .cpo_discard = vvp_page_discard,
+ .cpo_delete = vvp_page_delete,
+ .cpo_export = vvp_page_export,
+ .cpo_is_vmlocked = vvp_page_is_vmlocked,
+ .cpo_fini = vvp_page_fini,
+ .cpo_print = vvp_page_print,
+ .io = {
+ [CRT_READ] = {
+ .cpo_prep = vvp_page_prep_read,
+ .cpo_completion = vvp_page_completion_read,
+ .cpo_make_ready = vvp_page_fail,
+ },
+ [CRT_WRITE] = {
+ .cpo_prep = vvp_page_prep_write,
+ .cpo_completion = vvp_page_completion_write,
+ .cpo_make_ready = vvp_page_make_ready,
+ },
+ },
};
+static int vvp_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ ENTRY;
+ /* transient page should always be sent. */
+ RETURN(0);
+}
+
static void vvp_transient_page_verify(const struct cl_page *page)
{
}
struct inode *inode = vvp_object_inode(slice->cpl_obj);
int locked;
- locked = !mutex_trylock(&inode->i_mutex);
+ locked = !inode_trylock(inode);
if (!locked)
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return locked ? -EBUSY : -ENODATA;
}
static void vvp_transient_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *clp = slice->cpl_page;
struct vvp_object *clobj = cl2vvp(clp->cp_obj);
- vvp_page_fini_common(cp);
+ vvp_page_fini_common(vpg);
atomic_dec(&clobj->vob_transient_pages);
}
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
},
[CRT_WRITE] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
}
}
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
{
- struct ccc_page *cpg = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
struct page *vmpage = page->cp_vmpage;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
+ vpg->vpg_page = vmpage;
+ get_page(vmpage);
- INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
/* in cache, decref in vvp_page_delete */
atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_page_ops);
} else {
struct vvp_object *clobj = cl2vvp(obj);
- cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_transient_page_ops);
atomic_inc(&clobj->vob_transient_pages);
}
return 0;
}
-