X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=inline;f=lustre%2Finclude%2Fcl_object.h;h=087e2511a6529c294abaaaa2a2e0fb0e8902f023;hb=64f99d8993d8f4087de2adf59e9a834823f92898;hp=d8f6f1756346d7551a234a832ed9e79ce1963083;hpb=b14160d67ef3fc5b0d294c8d76c20317a05cda8c;p=fs%2Flustre-release.git diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index d8f6f17..087e251 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -251,6 +251,8 @@ struct cl_object { struct lu_object co_lu; /** per-object-layer operations */ const struct cl_object_operations *co_ops; + /** offset of page slice in cl_page buffer */ + int co_slice_off; }; /** @@ -277,15 +279,25 @@ struct cl_object_conf { */ struct inode *coc_inode; /** - * Validate object conf. If object is using an invalid conf, - * then invalidate it and set the new layout. + * Layout lock handle. */ - bool coc_validate_only; + struct ldlm_lock *coc_lock; /** - * Invalidate the current stripe configuration due to losing - * layout lock. + * Operation to handle layout, OBJECT_CONF_XYZ. */ - bool coc_invalidate; + int coc_opc; +}; + +enum { + /** configure layout, set up a new stripe, must be called while + * holding layout lock. */ + OBJECT_CONF_SET = 0, + /** invalidate the current stripe configuration due to losing + * layout lock. */ + OBJECT_CONF_INVALIDATE = 1, + /** wait for old layout to go away so that new layout can be + * set up. */ + OBJECT_CONF_WAIT = 2 }; /** @@ -309,10 +321,8 @@ struct cl_object_operations { * \retval valid-pointer pointer to already existing referenced page * to be used instead of newly created. */ - struct cl_page *(*coo_page_init)(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, - cfs_page_t *vmpage); + int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, cfs_page_t *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -393,9 +403,9 @@ struct cl_object_header { */ /** @{ */ /** Lock protecting page tree. */ - cfs_spinlock_t coh_page_guard; - /** Lock protecting lock list. */ - cfs_spinlock_t coh_lock_guard; + spinlock_t coh_page_guard; + /** Lock protecting lock list. */ + spinlock_t coh_lock_guard; /** @} locks */ /** Radix tree of cl_page's, cached for this object. */ struct radix_tree_root coh_tree; @@ -419,12 +429,16 @@ struct cl_object_header { * * \todo XXX this can be read/write lock if needed. */ - cfs_spinlock_t coh_attr_guard; - /** - * Number of objects above this one: 0 for a top-object, 1 for its - * sub-object, etc. - */ - unsigned coh_nesting; + spinlock_t coh_attr_guard; + /** + * Size of cl_page + page slices + */ + unsigned short coh_page_bufsize; + /** + * Number of objects above this one: 0 for a top-object, 1 for its + * sub-object, etc. + */ + unsigned char coh_nesting; }; /** @@ -723,12 +737,10 @@ struct cl_page { * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; - /** - * Linkage of pages within some group. Protected by - * cl_page::cp_mutex. */ - cfs_list_t cp_batch; - /** Mutex serializing membership of a page in a batch. */ - cfs_mutex_t cp_mutex; + /** Linkage of pages within group. Protected by cl_page::cp_mutex. */ + cfs_list_t cp_batch; + /** Mutex serializing membership of a page in a batch. */ + struct mutex cp_mutex; /** Linkage of pages within cl_req. */ cfs_list_t cp_flight; /** Transfer error. */ @@ -1102,6 +1114,16 @@ do { \ } \ } while (0) +static inline int __page_in_use(const struct cl_page *page, int refc) +{ + if (page->cp_type == CPT_CACHEABLE) + ++refc; + LASSERT(cfs_atomic_read(&page->cp_ref) > 0); + return (cfs_atomic_read(&page->cp_ref) > refc); +} +#define cl_page_in_use(pg) __page_in_use(pg, 1) +#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) + /** @} cl_page */ /** \addtogroup cl_lock cl_lock @@ -1555,7 +1577,7 @@ struct cl_lock { * * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). */ - cfs_mutex_t cll_guard; + struct mutex cll_guard; cfs_task_t *cll_guarder; int cll_depth; @@ -2358,18 +2380,19 @@ struct cl_io { */ ci_need_restart:1, /** - * Ignore layout change. - * Most of the CIT_MISC operations can ignore layout change, because - * the purpose to create this kind of cl_io is to give an environment - * to run clio methods, for example: - * 1. request group lock; - * 2. flush caching pages by osc; - * 3. writepage - * 4. echo client - * So far, only direct IO and glimpse clio need restart if layout - * change during IO time. + * to not refresh layout - the IO issuer knows that the layout won't + * change(page operations, layout change causes all page to be + * discarded), or it doesn't matter if it changes(sync). + */ + ci_ignore_layout:1, + /** + * Check if layout changed after the IO finishes. Mainly for HSM + * requirement. If IO occurs to openning files, it doesn't need to + * verify layout because HSM won't release openning files. + * Right now, only two opertaions need to verify layout: glimpse + * and setattr. */ - ci_ignore_layout:1; + ci_verify_layout:1; /** * Number of pages owned by this IO. For invariant checking. */ @@ -2552,22 +2575,29 @@ struct cl_req_slice { /* @} cl_req */ +enum cache_stats_item { + /** how many cache lookups were performed */ + CS_lookup = 0, + /** how many times cache lookup resulted in a hit */ + CS_hit, + /** how many entities are in the cache right now */ + CS_total, + /** how many entities in the cache are actively used (and cannot be + * evicted) right now */ + CS_busy, + /** how many entities were created at all */ + CS_create, + CS_NR +}; + +#define CS_NAMES { "lookup", "hit", "total", "busy", "create" } + /** * Stats for a generic cache (similar to inode, lu_object, etc. caches). */ struct cache_stats { const char *cs_name; - /** how many entities were created at all */ - cfs_atomic_t cs_created; - /** how many cache lookups were performed */ - cfs_atomic_t cs_lookup; - /** how many times cache lookup resulted in a hit */ - cfs_atomic_t cs_hit; - /** how many entities are in the cache right now */ - cfs_atomic_t cs_total; - /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ - cfs_atomic_t cs_busy; + cfs_atomic_t cs_stats[CS_NR]; }; /** These are not exported so far */ @@ -2730,6 +2760,18 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) return cl_object_header(o0) == cl_object_header(o1); } +static inline void cl_object_page_init(struct cl_object *clob, int size) +{ + clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; + cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); +} + +static inline void *cl_object_page_slice(struct cl_object *clob, + struct cl_page *page) +{ + return (void *)((char *)page + clob->co_slice_off); +} + /** @} cl_object */ /** \defgroup cl_page cl_page @@ -2873,6 +2915,7 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_lock *except, int pending, int canceld) { + LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj)); return cl_lock_at_pgoff(env, obj, page->cp_index, except, pending, canceld); } @@ -2885,6 +2928,8 @@ void cl_lock_get_trust (struct cl_lock *lock); void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); +void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, @@ -3148,12 +3193,14 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); * anchor and wakes up waiting thread when transfer is complete. */ struct cl_sync_io { - /** number of pages yet to be transferred. */ - cfs_atomic_t csi_sync_nr; - /** completion to be signaled when transfer is complete. */ - cfs_waitq_t csi_waitq; - /** error code. */ - int csi_sync_rc; + /** number of pages yet to be transferred. */ + cfs_atomic_t csi_sync_nr; + /** error code. */ + int csi_sync_rc; + /** barrier of destroy this structure */ + cfs_atomic_t csi_barrier; + /** completion to be signaled when transfer is complete. */ + cfs_waitq_t csi_waitq; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);