X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_dev.c;h=7db2a47185234d2709e63b06486062aa06a2f406;hp=dca7a3926ad50833da5f43f7f6ea569a1ca19428;hb=1e4d10af3909452b0eee1f99010d80aeb01d42a7;hpb=08aa217ce49aba1ded52e0f7adb8a607035123fd diff --git a/lustre/llite/vvp_dev.c b/lustre/llite/vvp_dev.c index dca7a39..7db2a47 100644 --- a/lustre/llite/vvp_dev.c +++ b/lustre/llite/vvp_dev.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,27 +23,23 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * cl_device and cl_device_type implementation for VVP layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_LLITE -#ifndef __KERNEL__ -# error This file is kernel only. -#endif - #include -#include - +#include "llite_internal.h" #include "vvp_internal.h" +#include /***************************************************************************** * @@ -60,57 +52,72 @@ * "llite_" (var. "ll_") prefix. */ -cfs_mem_cache_t *vvp_page_kmem; -cfs_mem_cache_t *vvp_thread_kmem; -static cfs_mem_cache_t *vvp_session_kmem; +static struct kmem_cache *ll_thread_kmem; +struct kmem_cache *vvp_object_kmem; +static struct kmem_cache *vvp_session_kmem; +static struct kmem_cache *vvp_thread_kmem; + static struct lu_kmem_descr vvp_caches[] = { - { - .ckd_cache = &vvp_page_kmem, - .ckd_name = "vvp_page_kmem", - .ckd_size = sizeof (struct ccc_page) - }, - { - .ckd_cache = &vvp_thread_kmem, - .ckd_name = "vvp_thread_kmem", - .ckd_size = sizeof (struct vvp_thread_info), - }, + { + .ckd_cache = &ll_thread_kmem, + .ckd_name = "ll_thread_kmem", + .ckd_size = sizeof(struct ll_thread_info), + }, + { + .ckd_cache = &vvp_object_kmem, + .ckd_name = "vvp_object_kmem", + .ckd_size = sizeof(struct vvp_object), + }, { .ckd_cache = &vvp_session_kmem, .ckd_name = "vvp_session_kmem", .ckd_size = sizeof (struct vvp_session) }, + { + .ckd_cache = &vvp_thread_kmem, + .ckd_name = "vvp_thread_kmem", + .ckd_size = sizeof(struct vvp_thread_info), + }, { .ckd_cache = NULL } }; -static void *vvp_key_init(const struct lu_context *ctx, - struct lu_context_key *key) +static void *ll_thread_key_init(const struct lu_context *ctx, + struct lu_context_key *key) { - struct vvp_thread_info *info; + struct ll_thread_info *lti; + + OBD_SLAB_ALLOC_PTR_GFP(lti, ll_thread_kmem, GFP_NOFS); + if (lti == NULL) + lti = ERR_PTR(-ENOMEM); - OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, CFS_ALLOC_IO); - if (info == NULL) - info = ERR_PTR(-ENOMEM); - return info; + return lti; } -static void vvp_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) +static void ll_thread_key_fini(const struct lu_context *ctx, + struct lu_context_key *key, void *data) { - struct vvp_thread_info *info = data; - OBD_SLAB_FREE_PTR(info, vvp_thread_kmem); + struct ll_thread_info *lti = data; + + OBD_SLAB_FREE_PTR(lti, ll_thread_kmem); } +struct lu_context_key ll_thread_key = { + .lct_tags = LCT_CL_THREAD, + .lct_init = ll_thread_key_init, + .lct_fini = ll_thread_key_fini, +}; + static void *vvp_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { - struct vvp_session *session; + struct vvp_session *session; - OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, CFS_ALLOC_IO); - if (session == NULL) - session = ERR_PTR(-ENOMEM); - return session; + OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS); + if (session == NULL) + session = ERR_PTR(-ENOMEM); + return session; } static void vvp_session_key_fini(const struct lu_context *ctx, @@ -120,35 +127,122 @@ static void vvp_session_key_fini(const struct lu_context *ctx, OBD_SLAB_FREE_PTR(session, vvp_session_kmem); } - -struct lu_context_key vvp_key = { - .lct_tags = LCT_CL_THREAD, - .lct_init = vvp_key_init, - .lct_fini = vvp_key_fini -}; - struct lu_context_key vvp_session_key = { .lct_tags = LCT_SESSION, .lct_init = vvp_session_key_init, .lct_fini = vvp_session_key_fini }; +static void *vvp_thread_key_init(const struct lu_context *ctx, + struct lu_context_key *key) +{ + struct vvp_thread_info *vti; + + OBD_SLAB_ALLOC_PTR_GFP(vti, vvp_thread_kmem, GFP_NOFS); + if (vti == NULL) + vti = ERR_PTR(-ENOMEM); + return vti; +} + +static void vvp_thread_key_fini(const struct lu_context *ctx, + struct lu_context_key *key, void *data) +{ + struct vvp_thread_info *vti = data; + OBD_SLAB_FREE_PTR(vti, vvp_thread_kmem); +} + +struct lu_context_key vvp_thread_key = { + .lct_tags = LCT_CL_THREAD, + .lct_init = vvp_thread_key_init, + .lct_fini = vvp_thread_key_fini, +}; + /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */ -LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key); +LU_TYPE_INIT_FINI(vvp, &ll_thread_key, &vvp_session_key, &vvp_thread_key); static const struct lu_device_operations vvp_lu_ops = { .ldo_object_alloc = vvp_object_alloc }; -static const struct cl_device_operations vvp_cl_ops = { - .cdo_req_init = ccc_req_init -}; +static struct lu_device *vvp_device_free(const struct lu_env *env, + struct lu_device *d) +{ + struct vvp_device *vdv = lu2vvp_dev(d); + struct cl_site *site = lu2cl_site(d->ld_site); + struct lu_device *next = cl2lu_dev(vdv->vdv_next); + + if (d->ld_site != NULL) { + cl_site_fini(site); + OBD_FREE_PTR(site); + } + + cl_device_fini(lu2cl_dev(d)); + OBD_FREE_PTR(vdv); + return next; +} static struct lu_device *vvp_device_alloc(const struct lu_env *env, - struct lu_device_type *t, - struct lustre_cfg *cfg) + struct lu_device_type *t, + struct lustre_cfg *cfg) { - return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops); + struct vvp_device *vdv; + struct lu_device *lud; + struct cl_site *site; + int rc; + ENTRY; + + OBD_ALLOC_PTR(vdv); + if (vdv == NULL) + RETURN(ERR_PTR(-ENOMEM)); + + lud = &vdv->vdv_cl.cd_lu_dev; + cl_device_init(&vdv->vdv_cl, t); + vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops; + + OBD_ALLOC_PTR(site); + if (site != NULL) { + rc = cl_site_init(site, &vdv->vdv_cl); + if (rc == 0) + rc = lu_site_init_finish(&site->cs_lu); + else { + LASSERT(lud->ld_site == NULL); + CERROR("Cannot init lu_site, rc %d.\n", rc); + OBD_FREE_PTR(site); + } + } else + rc = -ENOMEM; + if (rc != 0) { + vvp_device_free(env, lud); + lud = ERR_PTR(rc); + } + RETURN(lud); +} + +static int vvp_device_init(const struct lu_env *env, struct lu_device *d, + const char *name, struct lu_device *next) +{ + struct vvp_device *vdv; + int rc; + ENTRY; + + vdv = lu2vvp_dev(d); + vdv->vdv_next = lu2cl_dev(next); + + LASSERT(d->ld_site != NULL && next->ld_type != NULL); + next->ld_site = d->ld_site; + rc = next->ld_type->ldt_ops->ldto_device_init( + env, next, next->ld_type->ldt_name, NULL); + if (rc == 0) { + lu_device_get(next); + lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); + } + RETURN(rc); +} + +static struct lu_device *vvp_device_fini(const struct lu_env *env, + struct lu_device *d) +{ + return cl2lu_dev(lu2vvp_dev(d)->vdv_next); } static const struct lu_device_type_operations vvp_device_type_ops = { @@ -158,10 +252,10 @@ static const struct lu_device_type_operations vvp_device_type_ops = { .ldto_start = vvp_type_start, .ldto_stop = vvp_type_stop, - .ldto_device_alloc = vvp_device_alloc, - .ldto_device_free = ccc_device_free, - .ldto_device_init = ccc_device_init, - .ldto_device_fini = ccc_device_fini + .ldto_device_alloc = vvp_device_alloc, + .ldto_device_free = vvp_device_free, + .ldto_device_init = vvp_device_init, + .ldto_device_fini = vvp_device_fini, }; struct lu_device_type vvp_device_type = { @@ -171,30 +265,63 @@ struct lu_device_type vvp_device_type = { .ldt_ctx_tags = LCT_CL_THREAD }; +#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT +unsigned int (*vvp_account_page_dirtied)(struct page *page, + struct address_space *mapping); + +unsigned int ll_account_page_dirtied(struct page *page, + struct address_space *mapping) +{ + /* must use __set_page_dirty, which means unlocking and + * relocking, which hurts performance. + */ + ll_xa_unlock(&mapping->i_pages); + __set_page_dirty(page, mapping, 0); + ll_xa_lock(&mapping->i_pages); + return 0; +} +#endif + /** * A mutex serializing calls to vvp_inode_fini() under extreme memory * pressure, when environments cannot be allocated. */ int vvp_global_init(void) { - int result; + int rc; + + rc = lu_kmem_init(vvp_caches); + if (rc != 0) + return rc; + + rc = lu_device_type_init(&vvp_device_type); + if (rc != 0) + goto out_kmem; + +#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT + /* + * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied + */ + vvp_account_page_dirtied = (void *) + cfs_kallsyms_lookup_name("account_page_dirtied"); + if (!vvp_account_page_dirtied) + vvp_account_page_dirtied = ll_account_page_dirtied; +#endif - result = lu_kmem_init(vvp_caches); - if (result == 0) { - result = ccc_global_init(&vvp_device_type); - if (result != 0) - lu_kmem_fini(vvp_caches); - } - return result; + return 0; + +out_kmem: + lu_kmem_fini(vvp_caches); + + return rc; } void vvp_global_fini(void) { - ccc_global_fini(&vvp_device_type); - lu_kmem_fini(vvp_caches); + lu_device_type_fini(&vvp_device_type); + lu_kmem_fini(vvp_caches); } - /***************************************************************************** * * mirror obd-devices into cl devices. @@ -207,7 +334,7 @@ int cl_sb_init(struct super_block *sb) struct cl_device *cl; struct lu_env *env; int rc = 0; - int refcheck; + __u16 refcheck; sbi = ll_s2sbi(sb); env = cl_env_get(&refcheck); @@ -215,7 +342,6 @@ int cl_sb_init(struct super_block *sb) cl = cl_type_setup(env, NULL, &vvp_device_type, sbi->ll_dt_exp->exp_obd->obd_lu_dev); if (!IS_ERR(cl)) { - cl2ccc_dev(cl)->cdv_sb = sb; sbi->ll_cl = cl; sbi->ll_site = cl2lu_dev(cl)->ld_site; } @@ -230,7 +356,7 @@ int cl_sb_fini(struct super_block *sb) struct ll_sb_info *sbi; struct lu_env *env; struct cl_device *cld; - int refcheck; + __u16 refcheck; int result; ENTRY; @@ -250,153 +376,67 @@ int cl_sb_fini(struct super_block *sb) CERROR("Cannot cleanup cl-stack due to memory shortage.\n"); result = PTR_ERR(env); } - /* - * If mount failed (sbi->ll_cl == NULL), and this there are no other - * mounts, stop device types manually (this usually happens - * automatically when last device is destroyed). - */ - lu_types_stop(); - RETURN(result); + + RETURN(result); } /**************************************************************************** * - * /proc/fs/lustre/llite/$MNT/dump_page_cache + * debugfs/lustre/llite/$MNT/dump_page_cache * ****************************************************************************/ -/* - * To represent contents of a page cache as a byte stream, following - * information if encoded in 64bit offset: - * - * - file hash bucket in lu_site::ls_hash[] 28bits - * - * - how far file is from bucket head 4bits - * - * - page index 32bits - * - * First two data identify a file in the cache uniquely. - */ - -#define PGC_OBJ_SHIFT (32 + 4) -#define PGC_DEPTH_SHIFT (32) - -struct vvp_pgcache_id { - unsigned vpi_bucket; - unsigned vpi_depth; - uint32_t vpi_index; - - unsigned vpi_curdep; - struct lu_object_header *vpi_obj; +struct vvp_seq_private { + struct ll_sb_info *vsp_sbi; + struct lu_env *vsp_env; + u16 vsp_refcheck; + struct cl_object *vsp_clob; + struct rhashtable_iter vsp_iter; + u32 vsp_page_index; + /* + * prev_pos is the 'pos' of the last object returned + * by ->start of ->next. + */ + loff_t vvp_prev_pos; }; -static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id) +static struct page *vvp_pgcache_current(struct vvp_seq_private *priv) { - CLASSERT(sizeof(pos) == sizeof(__u64)); - - id->vpi_index = pos & 0xffffffff; - id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf; - id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT); -} - -static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id) -{ - return - ((__u64)id->vpi_index) | - ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) | - ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT); -} - -static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) -{ - struct vvp_pgcache_id *id = data; - struct lu_object_header *hdr = cfs_hash_object(hs, hnode); - - if (id->vpi_curdep-- > 0) - return 0; /* continue */ - - if (lu_object_is_dying(hdr)) - return 1; - - cfs_hash_get(hs, hnode); - id->vpi_obj = hdr; - return 1; -} - -static struct cl_object *vvp_pgcache_obj(const struct lu_env *env, - struct lu_device *dev, - struct vvp_pgcache_id *id) -{ - LASSERT(lu_device_is_cl(dev)); - - id->vpi_depth &= 0xf; - id->vpi_obj = NULL; - id->vpi_curdep = id->vpi_depth; - - cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket, - vvp_pgcache_obj_get, id); - if (id->vpi_obj != NULL) { - struct lu_object *lu_obj; - - lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type); - if (lu_obj != NULL) { - lu_object_ref_add(lu_obj, "dump", cfs_current()); - return lu2cl(lu_obj); - } - lu_object_put(env, lu_object_top(id->vpi_obj)); - - } else if (id->vpi_curdep > 0) { - id->vpi_depth = 0xf; - } - return NULL; -} - -static loff_t vvp_pgcache_find(const struct lu_env *env, - struct lu_device *dev, loff_t pos) -{ - struct cl_object *clob; - struct lu_site *site; - struct vvp_pgcache_id id; - - site = dev->ld_site; - vvp_pgcache_id_unpack(pos, &id); - - while (1) { - if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash)) - return ~0ULL; - clob = vvp_pgcache_obj(env, dev, &id); - if (clob != NULL) { - struct cl_object_header *hdr; - int nr; - struct cl_page *pg; - - /* got an object. Find next page. */ - hdr = cl_object_header(clob); - - spin_lock(&hdr->coh_page_guard); - nr = radix_tree_gang_lookup(&hdr->coh_tree, - (void **)&pg, - id.vpi_index, 1); - if (nr > 0) { - id.vpi_index = pg->cp_index; - /* Cant support over 16T file */ - nr = !(pg->cp_index > 0xffffffff); - } - spin_unlock(&hdr->coh_page_guard); - - lu_object_ref_del(&clob->co_lu, "dump", cfs_current()); - cl_object_put(env, clob); - if (nr > 0) - return vvp_pgcache_id_pack(&id); - } - /* to the next object. */ - ++id.vpi_depth; - id.vpi_depth &= 0xf; - if (id.vpi_depth == 0 && ++id.vpi_bucket == 0) - return ~0ULL; - id.vpi_index = 0; - } + struct lu_device *dev = &priv->vsp_sbi->ll_cl->cd_lu_dev; + struct lu_object_header *h; + struct page *vmpage = NULL; + + rhashtable_walk_start(&priv->vsp_iter); + while ((h = rhashtable_walk_next(&priv->vsp_iter)) != NULL) { + struct inode *inode; + int nr; + + if (!priv->vsp_clob) { + struct lu_object *lu_obj; + + lu_obj = lu_object_get_first(h, dev); + if (!lu_obj) + continue; + + priv->vsp_clob = lu2cl(lu_obj); + lu_object_ref_add(lu_obj, "dump", current); + priv->vsp_page_index = 0; + } + + inode = vvp_object_inode(priv->vsp_clob); + nr = find_get_pages_contig(inode->i_mapping, + priv->vsp_page_index, 1, &vmpage); + if (nr > 0) { + priv->vsp_page_index = vmpage->index; + break; + } + lu_object_ref_del(&priv->vsp_clob->co_lu, "dump", current); + cl_object_put(priv->vsp_env, priv->vsp_clob); + priv->vsp_clob = NULL; + priv->vsp_page_index = 0; + } + rhashtable_walk_stop(&priv->vsp_iter); + return vmpage; } #define seq_page_flag(seq, page, flag, has_flags) do { \ @@ -407,117 +447,105 @@ static loff_t vvp_pgcache_find(const struct lu_env *env, } while(0) static void vvp_pgcache_page_show(const struct lu_env *env, - struct seq_file *seq, struct cl_page *page) + struct seq_file *seq, struct cl_page *page) { - struct ccc_page *cpg; - cfs_page_t *vmpage; - int has_flags; - - cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type)); - vmpage = cpg->cpg_page; - seq_printf(seq," %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [", - 0 /* gen */, - cpg, page, - "none", - cpg->cpg_write_queued ? "wq" : "- ", - cpg->cpg_defer_uptodate ? "du" : "- ", - PageWriteback(vmpage) ? "wb" : "-", - vmpage, vmpage->mapping->host->i_ino, - vmpage->mapping->host->i_generation, - vmpage->mapping->host, vmpage->index, - page_count(vmpage)); - has_flags = 0; - seq_page_flag(seq, vmpage, locked, has_flags); - seq_page_flag(seq, vmpage, error, has_flags); - seq_page_flag(seq, vmpage, referenced, has_flags); - seq_page_flag(seq, vmpage, uptodate, has_flags); - seq_page_flag(seq, vmpage, dirty, has_flags); - seq_page_flag(seq, vmpage, writeback, has_flags); - seq_printf(seq, "%s]\n", has_flags ? "" : "-"); + struct vvp_page *vpg; + struct page *vmpage; + int has_flags; + + vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); + vmpage = vpg->vpg_page; + seq_printf(seq, " %5i | %p %p %s %s %s | %p "DFID"(%p) %lu %u [", + 0 /* gen */, + vpg, page, + "none", + vpg->vpg_defer_uptodate ? "du" : "- ", + PageWriteback(vmpage) ? "wb" : "-", + vmpage, + PFID(ll_inode2fid(vmpage->mapping->host)), + vmpage->mapping->host, vmpage->index, + page_count(vmpage)); + has_flags = 0; + seq_page_flag(seq, vmpage, locked, has_flags); + seq_page_flag(seq, vmpage, error, has_flags); + seq_page_flag(seq, vmpage, referenced, has_flags); + seq_page_flag(seq, vmpage, uptodate, has_flags); + seq_page_flag(seq, vmpage, dirty, has_flags); + seq_page_flag(seq, vmpage, writeback, has_flags); + seq_printf(seq, "%s]\n", has_flags ? "" : "-"); } static int vvp_pgcache_show(struct seq_file *f, void *v) { - loff_t pos; - struct ll_sb_info *sbi; - struct cl_object *clob; - struct lu_env *env; - struct cl_page *page; - struct cl_object_header *hdr; - struct vvp_pgcache_id id; - int refcheck; - int result; - - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - pos = *(loff_t *) v; - vvp_pgcache_id_unpack(pos, &id); - sbi = f->private; - clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id); - if (clob != NULL) { - hdr = cl_object_header(clob); - - spin_lock(&hdr->coh_page_guard); - page = cl_page_lookup(hdr, id.vpi_index); - spin_unlock(&hdr->coh_page_guard); - - seq_printf(f, "%8x@"DFID": ", - id.vpi_index, PFID(&hdr->coh_lu.loh_fid)); - if (page != NULL) { - vvp_pgcache_page_show(env, f, page); - cl_page_put(env, page); - } else - seq_puts(f, "missing\n"); - lu_object_ref_del(&clob->co_lu, "dump", cfs_current()); - cl_object_put(env, clob); - } else - seq_printf(f, "%llx missing\n", pos); - cl_env_put(env, &refcheck); - result = 0; - } else - result = PTR_ERR(env); - return result; + struct vvp_seq_private *priv = f->private; + struct page *vmpage = v; + struct cl_page *page; + + seq_printf(f, "%8lx@" DFID ": ", vmpage->index, + PFID(lu_object_fid(&priv->vsp_clob->co_lu))); + lock_page(vmpage); + page = cl_vmpage_page(vmpage, priv->vsp_clob); + unlock_page(vmpage); + put_page(vmpage); + + if (page) { + vvp_pgcache_page_show(priv->vsp_env, f, page); + cl_page_put(priv->vsp_env, page); + } else { + seq_puts(f, "missing\n"); + } + + return 0; } -static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos) +static void vvp_pgcache_rewind(struct vvp_seq_private *priv) { - struct ll_sb_info *sbi; - struct lu_env *env; - int refcheck; + if (priv->vvp_prev_pos) { + struct lu_site *s = priv->vsp_sbi->ll_cl->cd_lu_dev.ld_site; + + rhashtable_walk_exit(&priv->vsp_iter); + rhashtable_walk_enter(&s->ls_obj_hash, &priv->vsp_iter); + priv->vvp_prev_pos = 0; + if (priv->vsp_clob) { + lu_object_ref_del(&priv->vsp_clob->co_lu, "dump", + current); + cl_object_put(priv->vsp_env, priv->vsp_clob); + } + priv->vsp_clob = NULL; + } +} - sbi = f->private; +static struct page *vvp_pgcache_next_page(struct vvp_seq_private *priv) +{ + priv->vsp_page_index += 1; + return vvp_pgcache_current(priv); +} - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - sbi = f->private; - if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT) - pos = ERR_PTR(-EFBIG); - else { - *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, - *pos); - if (*pos == ~0ULL) - pos = NULL; - } - cl_env_put(env, &refcheck); - } - return pos; +static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos) +{ + struct vvp_seq_private *priv = f->private; + + if (*pos == 0) { + vvp_pgcache_rewind(priv); + } else if (*pos == priv->vvp_prev_pos) { + /* Return the current item */; + } else { + WARN_ON(*pos != priv->vvp_prev_pos + 1); + priv->vsp_page_index += 1; + } + + priv->vvp_prev_pos = *pos; + return vvp_pgcache_current(priv); } static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos) { - struct ll_sb_info *sbi; - struct lu_env *env; - int refcheck; + struct vvp_seq_private *priv = f->private; - env = cl_env_get(&refcheck); - if (!IS_ERR(env)) { - sbi = f->private; - *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1); - if (*pos == ~0ULL) - pos = NULL; - cl_env_put(env, &refcheck); - } - return pos; + WARN_ON(*pos != priv->vvp_prev_pos); + *pos += 1; + priv->vvp_prev_pos = *pos; + return vvp_pgcache_next_page(priv); } static void vvp_pgcache_stop(struct seq_file *f, void *v) @@ -525,32 +553,56 @@ static void vvp_pgcache_stop(struct seq_file *f, void *v) /* Nothing to do */ } -static struct seq_operations vvp_pgcache_ops = { - .start = vvp_pgcache_start, - .next = vvp_pgcache_next, - .stop = vvp_pgcache_stop, - .show = vvp_pgcache_show +static const struct seq_operations vvp_pgcache_ops = { + .start = vvp_pgcache_start, + .next = vvp_pgcache_next, + .stop = vvp_pgcache_stop, + .show = vvp_pgcache_show }; static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp) { - struct proc_dir_entry *dp = PDE(inode); - struct ll_sb_info *sbi = dp->data; - struct seq_file *seq; - int result; - - result = seq_open(filp, &vvp_pgcache_ops); - if (result == 0) { - seq = filp->private_data; - seq->private = sbi; - } - return result; + struct vvp_seq_private *priv; + struct lu_site *s; + + priv = __seq_open_private(filp, &vvp_pgcache_ops, sizeof(*priv)); + if (!priv) + return -ENOMEM; + + priv->vsp_sbi = inode->i_private; + priv->vsp_env = cl_env_get(&priv->vsp_refcheck); + priv->vsp_clob = NULL; + if (IS_ERR(priv->vsp_env)) { + int err = PTR_ERR(priv->vsp_env); + + seq_release_private(inode, filp); + return err; + } + + s = priv->vsp_sbi->ll_cl->cd_lu_dev.ld_site; + rhashtable_walk_enter(&s->ls_obj_hash, &priv->vsp_iter); + + return 0; +} + +static int vvp_dump_pgcache_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct vvp_seq_private *priv = seq->private; + + if (priv->vsp_clob) { + lu_object_ref_del(&priv->vsp_clob->co_lu, "dump", current); + cl_object_put(priv->vsp_env, priv->vsp_clob); + } + cl_env_put(priv->vsp_env, &priv->vsp_refcheck); + rhashtable_walk_exit(&priv->vsp_iter); + return seq_release_private(inode, file); } -struct file_operations vvp_dump_pgcache_file_ops = { - .owner = THIS_MODULE, - .open = vvp_dump_pgcache_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, +const struct file_operations vvp_dump_pgcache_file_ops = { + .owner = THIS_MODULE, + .open = vvp_dump_pgcache_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = vvp_dump_pgcache_seq_release, };