X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flclient.h;h=afa462c9e985178e1a9c8f6f67454874be495cb4;hp=96fa5fbab3c9e54ad1ff67988fbb0309a4886262;hb=8701e7e4b5ec1b34700c95b9b6588f4745730b72;hpb=9fb46705ae86aa2c0ac29427f0ff24f923560eb7 diff --git a/lustre/include/lclient.h b/lustre/include/lclient.h index 96fa5fb..afa462c 100644 --- a/lustre/include/lclient.h +++ b/lustre/include/lclient.h @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -43,6 +43,18 @@ #ifndef LCLIENT_H #define LCLIENT_H +#include +#include + +enum obd_notify_event; +struct inode; +struct lov_stripe_md; +struct lustre_md; +struct obd_capa; +struct obd_device; +struct obd_export; +struct page; + blkcnt_t dirty_cnt(struct inode *inode); int cl_glimpse_size0(struct inode *inode, int agl); @@ -99,14 +111,19 @@ struct ccc_io { union { struct { - int cui_locks_released; enum ccc_setattr_lock_type cui_local_lock; } setattr; - } u; - /** - * True iff io is processing glimpse right now. - */ - int cui_glimpse; + struct { + struct cl_page_list cui_queue; + unsigned long cui_written; + int cui_from; + int cui_to; + } write; + } u; + /** + * True iff io is processing glimpse right now. + */ + int cui_glimpse; /** * Layout version when this IO is initialized */ @@ -115,13 +132,11 @@ struct ccc_io { * File descriptor against which IO is done. */ struct ll_file_data *cui_fd; -#ifndef HAVE_FILE_WRITEV struct kiocb *cui_iocb; -#endif }; /** - * True, if \a io is a normal io, False for other (sendfile, splice*). + * True, if \a io is a normal io, False for other splice_{read,write}. * must be impementated in arch specific code. */ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); @@ -190,7 +205,7 @@ struct ccc_object { * * \see ccc_page::cpg_pending_linkage */ - cfs_list_t cob_pending_list; + struct list_head cob_pending_list; /** * Access this counter is protected by inode->i_sem. Now that @@ -198,12 +213,12 @@ struct ccc_object { * we don't need to hold any lock.. */ int cob_transient_pages; - /** - * Number of outstanding mmaps on this file. - * - * \see ll_vm_open(), ll_vm_close(). - */ - cfs_atomic_t cob_mmap_cnt; + /** + * Number of outstanding mmaps on this file. + * + * \see ll_vm_open(), ll_vm_close(). + */ + atomic_t cob_mmap_cnt; /** * various flags @@ -215,27 +230,27 @@ struct ccc_object { * * \see ll_dirty_page_discard_warn. */ - int cob_discard_page_warned:1; + unsigned int cob_discard_page_warned:1; }; /** * ccc-private page state. */ struct ccc_page { - struct cl_page_slice cpg_cl; - int cpg_defer_uptodate; - int cpg_ra_used; - int cpg_write_queued; - /** - * Non-empty iff this page is already counted in - * ccc_object::cob_pending_list. Protected by - * ccc_object::cob_pending_guard. This list is only used as a flag, - * that is, never iterated through, only checked for list_empty(), but - * having a list is useful for debugging. - */ - cfs_list_t cpg_pending_linkage; - /** VM page */ - cfs_page_t *cpg_page; + struct cl_page_slice cpg_cl; + unsigned cpg_defer_uptodate:1, + cpg_ra_used:1, + cpg_write_queued:1; + /** + * Non-empty iff this page is already counted in + * ccc_object::cob_pending_list. Protected by + * ccc_object::cob_pending_guard. This list is only used as a flag, + * that is, never iterated through, only checked for list_empty(), but + * having a list is useful for debugging. + */ + struct list_head cpg_pending_linkage; + /** VM page */ + struct page *cpg_page; }; static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) @@ -243,7 +258,12 @@ static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) return container_of(slice, struct ccc_page, cpg_cl); } -struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage); +static inline pgoff_t ccc_index(struct ccc_page *ccc) +{ + return ccc->cpg_cl.cpl_index; +} + +struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); struct ccc_device { struct cl_device cdv_cl; @@ -305,10 +325,6 @@ int ccc_object_glimpse(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb); int ccc_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); -cfs_page_t *ccc_page_vmpage(const struct lu_env *env, - const struct cl_page_slice *slice); -int ccc_page_is_under_lock(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice); void ccc_transient_page_verify(const struct cl_page *page); int ccc_transient_page_own(const struct lu_env *env, @@ -334,6 +350,7 @@ void ccc_lock_delete(const struct lu_env *env, void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice); int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice, struct cl_io *io, __u32 enqflags); +int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice); int ccc_lock_fits_into(const struct lu_env *env, @@ -374,14 +391,14 @@ struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice); struct ccc_io *cl2ccc_io (const struct lu_env *env, const struct cl_io_slice *slice); struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice); -cfs_page_t *cl2vm_page (const struct cl_page_slice *slice); +struct page *cl2vm_page (const struct cl_page_slice *slice); struct inode *ccc_object_inode(const struct cl_object *obj); struct ccc_object *cl_inode2ccc (struct inode *inode); int cl_setattr_ost(struct inode *inode, const struct iattr *attr, struct obd_capa *capa); -struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage); +struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); int ccc_object_invariant(const struct cl_object *obj); int cl_file_inode_init(struct inode *inode, struct lustre_md *md); void cl_inode_fini(struct inode *inode); @@ -391,7 +408,7 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent); __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32); __u32 cl_fid_build_gen(const struct lu_fid *fid); -#ifdef INVARIANT_CHECK +#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK # define CLOBINVRNT(env, clob, expr) \ do { \ if (unlikely(!(expr))) { \ @@ -399,10 +416,10 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid); LINVRNT(0); \ } \ } while (0) -#else /* !INVARIANT_CHECK */ +#else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ # define CLOBINVRNT(env, clob, expr) \ - ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr)) -#endif /* !INVARIANT_CHECK */ + ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr)) +#endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp); int cl_ocd_update(struct obd_device *host, @@ -435,16 +452,65 @@ struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode); void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); /** - * Data structure managing a client's cached clean pages. An LRU of - * pages is maintained, along with other statistics. + * Data structure managing a client's cached pages. A count of + * "unstable" pages is maintained, and an LRU of clean pages is + * maintained. "unstable" pages are pages pinned by the ptlrpc + * layer for recovery purposes. */ struct cl_client_cache { - cfs_atomic_t ccc_users; /* # of users (OSCs) of this data */ - cfs_list_t ccc_lru; /* LRU list of cached clean pages */ - spinlock_t ccc_lru_lock; /* lock for list */ - cfs_atomic_t ccc_lru_left; /* # of LRU entries available */ - unsigned long ccc_lru_max; /* Max # of LRU entries possible */ - unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */ + /** + * # of users (OSCs) + */ + atomic_t ccc_users; + /** + * # of LRU entries available + */ + atomic_t ccc_lru_left; + /** + * List of entities(OSCs) for this LRU cache + */ + struct list_head ccc_lru; + /** + * Max # of LRU entries + */ + unsigned long ccc_lru_max; + /** + * Lock to protect ccc_lru list + */ + spinlock_t ccc_lru_lock; + /** + * # of threads are doing shrinking + */ + unsigned int ccc_lru_shrinkers; + /** + * Set if unstable check is enabled + */ + unsigned int ccc_unstable_check:1; + /** + * Waitq for awaiting unstable pages to reach zero. + * Used at umounting time and signaled on BRW commit + */ + wait_queue_head_t ccc_unstable_waitq; + /** + * # of unstable pages for this mount point + */ + atomic_t ccc_unstable_nr; +}; + +enum { + LUSTRE_OPC_MKDIR = 0, + LUSTRE_OPC_SYMLINK = 1, + LUSTRE_OPC_MKNOD = 2, + LUSTRE_OPC_CREATE = 3, + LUSTRE_OPC_ANY = 5 +}; + +enum op_cli_flags { + CLI_SET_MEA = 1 << 0, + CLI_RM_ENTRY = 1 << 1, + CLI_HASH64 = 1 << 2, + CLI_API32 = 1 << 3, + CLI_MIGRATE = 1 << 4, }; #endif /*LCLIENT_H */