X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fvvp_internal.h;h=c9240dfa173be548945981b5d1cdda7492faa11c;hb=8a13ffb4a1eafff18cb891fdc812afaf09136f29;hp=5b8c74717787e0d5953d79e371d290343090b6ec;hpb=e2d2fbc07bf8f45e19d8f3127c3a7088351126d6;p=fs%2Flustre-release.git diff --git a/lustre/llite/vvp_internal.h b/lustre/llite/vvp_internal.h index 5b8c747..c9240df 100644 --- a/lustre/llite/vvp_internal.h +++ b/lustre/llite/vvp_internal.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2013, 2014, Intel Corporation. + * Copyright (c) 2013, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -46,165 +42,163 @@ enum obd_notify_event; struct inode; -struct lov_stripe_md; struct lustre_md; -struct obd_capa; struct obd_device; struct obd_export; struct page; -blkcnt_t dirty_cnt(struct inode *inode); - -int cl_glimpse_size0(struct inode *inode, int agl); -int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, - struct inode *inode, struct cl_object *clob, int agl); - -static inline int cl_glimpse_size(struct inode *inode) -{ - return cl_glimpse_size0(inode, 0); -} - -static inline int cl_agl(struct inode *inode) -{ - return cl_glimpse_size0(inode, 1); -} - -/** - * Locking policy for setattr. - */ -enum ccc_setattr_lock_type { - /** Locking is done by server */ - SETATTR_NOLOCK, - /** Extent lock is enqueued */ - SETATTR_EXTENT_LOCK, - /** Existing local extent lock is used */ - SETATTR_MATCH_LOCK +enum vvp_io_subtype { + /** normal IO */ + IO_NORMAL, + /** io started from splice_{read|write} */ + IO_SPLICE, }; - /** - * IO state private to vvp or slp layers. + * IO state private to VVP layer. */ -struct ccc_io { +struct vvp_io { /** super class */ - struct cl_io_slice cui_cl; - struct cl_io_lock_link cui_link; + struct cl_io_slice vui_cl; + struct cl_io_lock_link vui_link; /** * I/O vector information to or from which read/write is going. */ - struct iovec *cui_iov; - unsigned long cui_nrsegs; - /** - * Total iov count for left IO. - */ - unsigned long cui_tot_nrsegs; - /** - * Old length for iov that was truncated partially. - */ - size_t cui_iov_olen; + struct iov_iter *vui_iter; /** * Total size for the left IO. */ - size_t cui_tot_count; + size_t vui_tot_count; union { + struct vvp_fault_io { + /** + * Inode modification time that is checked across DLM + * lock request. + */ + time64_t ft_mtime; + struct vm_area_struct *ft_vma; + /** + * locked page returned from vvp_io + */ + struct page *ft_vmpage; + /** + * kernel fault info + */ + struct vm_fault *ft_vmf; + /** + * fault API used bitflags for return code. + */ + unsigned int ft_flags; + /** + * check that flags are from filemap_fault + */ + bool ft_flags_valid; + } fault; struct { - enum ccc_setattr_lock_type cui_local_lock; - } setattr; + struct pipe_inode_info *vui_pipe; + unsigned int vui_flags; + } splice; struct { - struct cl_page_list cui_queue; - unsigned long cui_written; - int cui_from; - int cui_to; + struct cl_page_list vui_queue; + unsigned long vui_written; + int vui_from; + int vui_to; } write; } u; + + enum vvp_io_subtype vui_io_subtype; + /** * Layout version when this IO is initialized */ - __u32 cui_layout_gen; + __u32 vui_layout_gen; /** * File descriptor against which IO is done. */ - struct ll_file_data *cui_fd; - struct kiocb *cui_iocb; + struct ll_file_data *vui_fd; + struct kiocb *vui_iocb; + + /* Readahead state. */ + pgoff_t vui_ra_start; + pgoff_t vui_ra_count; + /* Set when vui_ra_{start,count} have been initialized. */ + bool vui_ra_valid; }; -/** - * True, if \a io is a normal io, False for other splice_{read,write}. - * must be impementated in arch specific code. - */ -int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); +extern struct lu_device_type vvp_device_type; -extern struct lu_context_key ccc_key; -extern struct lu_context_key ccc_session_key; +extern struct lu_context_key vvp_session_key; +extern struct lu_context_key vvp_thread_key; +extern struct kmem_cache *vvp_lock_kmem; extern struct kmem_cache *vvp_object_kmem; -struct ccc_thread_info { - struct cl_lock cti_lock; - struct cl_lock_descr cti_descr; - struct cl_io cti_io; - struct cl_attr cti_attr; +struct vvp_thread_info { + struct cl_lock vti_lock; + struct cl_lock_descr vti_descr; + struct cl_io vti_io; + struct cl_attr vti_attr; }; -static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) +static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) { - struct ccc_thread_info *info; + struct vvp_thread_info *vti; - info = lu_context_key_get(&env->le_ctx, &ccc_key); - LASSERT(info != NULL); + vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key); + LASSERT(vti != NULL); - return info; + return vti; } -static inline struct cl_lock *ccc_env_lock(const struct lu_env *env) +static inline struct cl_lock *vvp_env_lock(const struct lu_env *env) { - struct cl_lock *lock = &ccc_env_info(env)->cti_lock; + struct cl_lock *lock = &vvp_env_info(env)->vti_lock; memset(lock, 0, sizeof(*lock)); return lock; } -static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env) +static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env) { - struct cl_attr *attr = &ccc_env_info(env)->cti_attr; + struct cl_attr *attr = &vvp_env_info(env)->vti_attr; memset(attr, 0, sizeof(*attr)); return attr; } -static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env) +static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env) { - struct cl_io *io = &ccc_env_info(env)->cti_io; + struct cl_io *io = &vvp_env_info(env)->vti_io; memset(io, 0, sizeof(*io)); return io; } -struct ccc_session { - struct ccc_io cs_ios; +struct vvp_session { + struct vvp_io vs_ios; }; -static inline struct ccc_session *ccc_env_session(const struct lu_env *env) +static inline struct vvp_session *vvp_env_session(const struct lu_env *env) { - struct ccc_session *ses; + struct vvp_session *ses; - ses = lu_context_key_get(env->le_ses, &ccc_session_key); + ses = lu_context_key_get(env->le_ses, &vvp_session_key); LASSERT(ses != NULL); return ses; } -static inline struct ccc_io *ccc_env_io(const struct lu_env *env) +static inline struct vvp_io *vvp_env_io(const struct lu_env *env) { - return &ccc_env_session(env)->cs_ios; + return &vvp_env_session(env)->vs_ios; } /** - * ccc-private object state. + * VPP-private object state. */ struct vvp_object { struct cl_object_header vob_header; @@ -212,14 +206,6 @@ struct vvp_object { struct inode *vob_inode; /** - * A list of dirty pages pending IO in the cache. Used by - * SOM. Protected by ll_inode_info::lli_lock. - * - * \see ccc_page::cpg_pending_linkage - */ - struct list_head vob_pending_list; - - /** * Number of transient pages. This is no longer protected by i_sem, * and needs to be atomic. This is not actually used for anything, * and can probably be removed. @@ -246,113 +232,36 @@ struct vvp_object { }; /** - * ccc-private page state. + * VVP-private page state. */ -struct ccc_page { - struct cl_page_slice cpg_cl; - unsigned cpg_defer_uptodate:1, - cpg_ra_used:1, - cpg_write_queued:1; - /** - * Non-empty iff this page is already counted in - * vvp_object::vob_pending_list. This list is only used as a flag, - * that is, never iterated through, only checked for list_empty(), but - * having a list is useful for debugging. - */ - struct list_head cpg_pending_linkage; +struct vvp_page { + struct cl_page_slice vpg_cl; + unsigned vpg_defer_uptodate:1, + vpg_ra_updated:1, + vpg_ra_used:1; /** VM page */ - struct page *cpg_page; + struct page *vpg_page; }; -static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) +static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice) { - return container_of(slice, struct ccc_page, cpg_cl); + return container_of(slice, struct vvp_page, vpg_cl); } -static inline pgoff_t ccc_index(struct ccc_page *ccc) +static inline pgoff_t vvp_index(struct vvp_page *vpg) { - return ccc->cpg_cl.cpl_index; + return vpg->vpg_cl.cpl_index; } -struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); - struct vvp_device { struct cl_device vdv_cl; - struct super_block *vdv_sb; struct cl_device *vdv_next; }; -struct ccc_lock { - struct cl_lock_slice clk_cl; -}; - -struct ccc_req { - struct cl_req_slice crq_cl; +struct vvp_lock { + struct cl_lock_slice vlk_cl; }; -void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key); -void ccc_key_fini(const struct lu_context *ctx, struct lu_context_key *key, - void *data); -void *ccc_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key); -void ccc_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data); - -int ccc_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req); -void ccc_umount(const struct lu_env *env, struct cl_device *dev); -int ccc_global_init(struct lu_device_type *device_type); -void ccc_global_fini(struct lu_device_type *device_type); -int ccc_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io, - const struct cl_lock_operations *lkops); -int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice); -void ccc_transient_page_verify(const struct cl_page *page); -int ccc_transient_page_own(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io, int nonblock); -void ccc_transient_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); -void ccc_transient_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); -void ccc_transient_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); -void ccc_transient_page_discard(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); -int ccc_transient_page_prep(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); -void ccc_lock_delete(const struct lu_env *env, - const struct cl_lock_slice *slice); -void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice); -int ccc_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *io, struct cl_sync_io *anchor); -void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios); -int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - pgoff_t start, pgoff_t end); -int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, - __u32 enqflags, enum cl_lock_mode mode, - loff_t start, loff_t end); -void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios); -void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, - size_t nob); -void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio, - struct cl_io *io); -int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io, loff_t start, size_t count, int *exceed); -void ccc_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret); -void ccc_req_attr_set(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *oa, obd_valid flags); - static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv) { return &vdv->vdv_cl.cd_lu_dev; @@ -386,30 +295,22 @@ static inline struct inode *vvp_object_inode(const struct cl_object *obj) int vvp_object_invariant(const struct cl_object *obj); struct vvp_object *cl_inode2vvp(struct inode *inode); -struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice); -struct ccc_io *cl2ccc_io(const struct lu_env *env, - const struct cl_io_slice *slice); -struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice); -struct page *cl2vm_page(const struct cl_page_slice *slice); - -int cl_setattr_ost(struct inode *inode, const struct iattr *attr, - struct obd_capa *capa); - -struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); -int cl_file_inode_init(struct inode *inode, struct lustre_md *md); -void cl_inode_fini(struct inode *inode); -int cl_local_size(struct inode *inode); +static inline struct page *cl2vm_page(const struct cl_page_slice *slice) +{ + return cl2vvp_page(slice)->vpg_page; +} -__u16 ll_dirent_type_get(struct lu_dirent *ent); -__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32); -__u32 cl_fid_build_gen(const struct lu_fid *fid); +static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) +{ + return container_of(slice, struct vvp_lock, vlk_cl); +} #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK # define CLOBINVRNT(env, clob, expr) \ do { \ if (unlikely(!(expr))) { \ LU_OBJECT_DEBUG(D_ERROR, (env), &(clob)->co_lu, \ - #expr "\n"); \ + #expr); \ LINVRNT(0); \ } \ } while (0) @@ -418,46 +319,11 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid); ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr)) #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ -int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp); -int cl_ocd_update(struct obd_device *host, - struct obd_device *watched, - enum obd_notify_event ev, void *owner, void *data); - -struct ccc_grouplock { - struct lu_env *cg_env; - struct cl_io *cg_io; - struct cl_lock *cg_lock; - unsigned long cg_gid; -}; - -int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, - struct ccc_grouplock *cg); -void cl_put_grouplock(struct ccc_grouplock *cg); - -/** - * New interfaces to get and put lov_stripe_md from lov layer. This violates - * layering because lov_stripe_md is supposed to be a private data in lov. - * - * NB: If you find you have to use these interfaces for your new code, please - * think about it again. These interfaces may be removed in the future for - * better layering. */ -struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); -void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); -struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode); -void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); - -enum { - LUSTRE_OPC_MKDIR = 0, - LUSTRE_OPC_SYMLINK = 1, - LUSTRE_OPC_MKNOD = 2, - LUSTRE_OPC_CREATE = 3, - LUSTRE_OPC_ANY = 5, -}; - int vvp_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); +int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); int vvp_page_init(const struct lu_env *env, struct cl_object *obj, @@ -466,6 +332,9 @@ struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); +int vvp_global_init(void); +void vvp_global_fini(void); + extern const struct file_operations vvp_dump_pgcache_file_ops; #endif /* VVP_INTERNAL_H */