X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=517ce27efe2403ef5f49bcc9d1f39f8a51296dd0;hp=b4f791750fe668721a24394393e9ee4743451929;hb=a067251099b6b225f2409f680d9e4423253d0730;hpb=ecaba99677b28536f9c376b2b835b554a7792668 diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index b4f7917..517ce27 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -251,6 +251,8 @@ struct cl_object { struct lu_object co_lu; /** per-object-layer operations */ const struct cl_object_operations *co_ops; + /** offset of page slice in cl_page buffer */ + int co_slice_off; }; /** @@ -277,10 +279,25 @@ struct cl_object_conf { */ struct inode *coc_inode; /** - * Invalidate the current stripe configuration due to losing - * layout lock. + * Layout lock handle. */ - bool coc_invalidate; + struct ldlm_lock *coc_lock; + /** + * Operation to handle layout, OBJECT_CONF_XYZ. + */ + int coc_opc; +}; + +enum { + /** configure layout, set up a new stripe, must be called while + * holding layout lock. */ + OBJECT_CONF_SET = 0, + /** invalidate the current stripe configuration due to losing + * layout lock. */ + OBJECT_CONF_INVALIDATE = 1, + /** wait for old layout to go away so that new layout can be + * set up. */ + OBJECT_CONF_WAIT = 2 }; /** @@ -304,10 +321,8 @@ struct cl_object_operations { * \retval valid-pointer pointer to already existing referenced page * to be used instead of newly created. */ - struct cl_page *(*coo_page_init)(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, - cfs_page_t *vmpage); + int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -388,9 +403,9 @@ struct cl_object_header { */ /** @{ */ /** Lock protecting page tree. */ - cfs_spinlock_t coh_page_guard; - /** Lock protecting lock list. */ - cfs_spinlock_t coh_lock_guard; + spinlock_t coh_page_guard; + /** Lock protecting lock list. */ + spinlock_t coh_lock_guard; /** @} locks */ /** Radix tree of cl_page's, cached for this object. */ struct radix_tree_root coh_tree; @@ -414,12 +429,16 @@ struct cl_object_header { * * \todo XXX this can be read/write lock if needed. */ - cfs_spinlock_t coh_attr_guard; - /** - * Number of objects above this one: 0 for a top-object, 1 for its - * sub-object, etc. - */ - unsigned coh_nesting; + spinlock_t coh_attr_guard; + /** + * Size of cl_page + page slices + */ + unsigned short coh_page_bufsize; + /** + * Number of objects above this one: 0 for a top-object, 1 for its + * sub-object, etc. + */ + unsigned char coh_nesting; }; /** @@ -462,7 +481,7 @@ struct cl_object_header { * corresponding radix tree at the corresponding logical offset. * * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), cfs_page_t. It is assumed, that this + * page in Linux kernel, for example), struct page. It is assumed, that this * association is implemented by one of cl_page layers (top layer in the * current design) that * @@ -472,7 +491,7 @@ struct cl_object_header { * - translates state (page flag bits) and locking between lustre and * environment. * - * The association between cl_page and cfs_page_t is immutable and + * The association between cl_page and struct page is immutable and * established when cl_page is created. * * cl_page can be "owned" by a particular cl_io (see below), guaranteeing @@ -481,7 +500,7 @@ struct cl_object_header { * eviction of the page from the memory). Note, that in general cl_io * cannot be identified with a particular thread, and page ownership is not * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and cfs_page_t has to implement + * implementing association between cl_page and struct page has to implement * ownership on top of available synchronization mechanisms. * * While lustre client maintains the notion of an page ownership by io, @@ -515,7 +534,7 @@ struct cl_object_header { * - by doing a lookup in the cl_object radix tree, protected by the * spin-lock; * - * - by starting from VM-locked cfs_page_t and following some + * - by starting from VM-locked struct page and following some * hosting environment method (e.g., following ->private pointer in * the case of Linux kernel), see cl_vmpage_page(); * @@ -542,7 +561,7 @@ struct cl_object_header { * * Linux Kernel implementation. * - * Binding between cl_page and cfs_page_t (which is a typedef for + * Binding between cl_page and struct page (which is a typedef for * struct page) is implemented in the vvp layer. cl_page is attached to the * ->private pointer of the struct page, together with the setting of * PG_private bit in page->flags, and acquiring additional reference on the @@ -691,7 +710,7 @@ enum cl_page_flags { }; /** - * Fields are protected by the lock on cfs_page_t, except for atomics and + * Fields are protected by the lock on struct page, except for atomics and * immutables. * * \invariant Data type invariants are in cl_page_invariant(). Basically: @@ -718,14 +737,10 @@ struct cl_page { * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; - /** Protect to get and put page, see cl_page_put and cl_vmpage_page */ - cfs_spinlock_t cp_lock; - /** - * Linkage of pages within some group. Protected by - * cl_page::cp_mutex. */ - cfs_list_t cp_batch; - /** Mutex serializing membership of a page in a batch. */ - cfs_mutex_t cp_mutex; + /** Linkage of pages within group. Protected by cl_page::cp_mutex. */ + cfs_list_t cp_batch; + /** Mutex serializing membership of a page in a batch. */ + struct mutex cp_mutex; /** Linkage of pages within cl_req. */ cfs_list_t cp_flight; /** Transfer error. */ @@ -754,14 +769,14 @@ struct cl_page { struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; - /** Link to an object, for debugging. */ - struct lu_ref_link *cp_obj_ref; - /** Link to a queue, for debugging. */ - struct lu_ref_link *cp_queue_ref; - /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ - unsigned cp_flags; - /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; + /** Link to an object, for debugging. */ + struct lu_ref_link cp_obj_ref; + /** Link to a queue, for debugging. */ + struct lu_ref_link cp_queue_ref; + /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ + unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -820,7 +835,7 @@ enum cl_req_type { */ struct cl_page_operations { /** - * cl_page<->cfs_page_t methods. Only one layer in the stack has to + * cl_page<->struct page methods. Only one layer in the stack has to * implement these. Current code assumes that this functionality is * provided by the topmost layer, see cl_page_disown0() as an example. */ @@ -828,7 +843,7 @@ struct cl_page_operations { /** * \return the underlying VM page. Optional. */ - cfs_page_t *(*cpo_vmpage)(const struct lu_env *env, + struct page *(*cpo_vmpage)(const struct lu_env *env, const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive @@ -1099,6 +1114,16 @@ do { \ } \ } while (0) +static inline int __page_in_use(const struct cl_page *page, int refc) +{ + if (page->cp_type == CPT_CACHEABLE) + ++refc; + LASSERT(cfs_atomic_read(&page->cp_ref) > 0); + return (cfs_atomic_read(&page->cp_ref) > refc); +} +#define cl_page_in_use(pg) __page_in_use(pg, 1) +#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) + /** @} cl_page */ /** \addtogroup cl_lock cl_lock @@ -1552,7 +1577,7 @@ struct cl_lock { * * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). */ - cfs_mutex_t cll_guard; + struct mutex cll_guard; cfs_task_t *cll_guarder; int cll_depth; @@ -1599,13 +1624,13 @@ struct cl_lock { * A list of holds on this lock, for debugging. */ struct lu_ref cll_holders; - /** - * A reference for cl_lock::cll_descr::cld_obj. For debugging. - */ - struct lu_ref_link *cll_obj_ref; + /** + * A reference for cl_lock::cll_descr::cld_obj. For debugging. + */ + struct lu_ref_link cll_obj_ref; #ifdef CONFIG_LOCKDEP - /* "dep_map" name is assumed by lockdep.h macros. */ - struct lockdep_map dep_map; + /* "dep_map" name is assumed by lockdep.h macros. */ + struct lockdep_map dep_map; #endif }; @@ -2250,9 +2275,7 @@ enum cl_io_lock_dmd { /** Layers are free to decide between local and global locking. */ CILR_MAYBE, /** Never lock: there is no cache (e.g., liblustre). */ - CILR_NEVER, - /** Peek lock: use existing locks, don't queue new ones */ - CILR_PEEK + CILR_NEVER }; enum cl_fsync_mode { @@ -2493,11 +2516,11 @@ struct cl_req_operations { * A per-object state that (potentially multi-object) transfer request keeps. */ struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link *ro_obj_ref; - /* something else? Number of pages for a given object? */ + /** object itself */ + struct cl_object *ro_obj; + /** reference to cl_req_obj::ro_obj. For debugging. */ + struct lu_ref_link ro_obj_ref; + /* something else? Number of pages for a given object? */ }; /** @@ -2550,22 +2573,29 @@ struct cl_req_slice { /* @} cl_req */ +enum cache_stats_item { + /** how many cache lookups were performed */ + CS_lookup = 0, + /** how many times cache lookup resulted in a hit */ + CS_hit, + /** how many entities are in the cache right now */ + CS_total, + /** how many entities in the cache are actively used (and cannot be + * evicted) right now */ + CS_busy, + /** how many entities were created at all */ + CS_create, + CS_NR +}; + +#define CS_NAMES { "lookup", "hit", "total", "busy", "create" } + /** * Stats for a generic cache (similar to inode, lu_object, etc. caches). */ struct cache_stats { const char *cs_name; - /** how many entities were created at all */ - cfs_atomic_t cs_created; - /** how many cache lookups were performed */ - cfs_atomic_t cs_lookup; - /** how many times cache lookup resulted in a hit */ - cfs_atomic_t cs_hit; - /** how many entities are in the cache right now */ - cfs_atomic_t cs_total; - /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ - cfs_atomic_t cs_busy; + cfs_atomic_t cs_stats[CS_NR]; }; /** These are not exported so far */ @@ -2728,6 +2758,18 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) return cl_object_header(o0) == cl_object_header(o1); } +static inline void cl_object_page_init(struct cl_object *clob, int size) +{ + clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; + cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); +} + +static inline void *cl_object_page_slice(struct cl_object *clob, + struct cl_page *page) +{ + return (void *)((char *)page + clob->co_slice_off); +} + /** @} cl_object */ /** \defgroup cl_page cl_page @@ -2766,9 +2808,9 @@ void cl_page_print (const struct lu_env *env, void *cookie, void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); -cfs_page_t *cl_page_vmpage (const struct lu_env *env, +struct page *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); -struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); +struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, @@ -2871,6 +2913,7 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_lock *except, int pending, int canceld) { + LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj)); return cl_lock_at_pgoff(env, obj, page->cp_index, except, pending, canceld); } @@ -3148,12 +3191,14 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); * anchor and wakes up waiting thread when transfer is complete. */ struct cl_sync_io { - /** number of pages yet to be transferred. */ - cfs_atomic_t csi_sync_nr; - /** completion to be signaled when transfer is complete. */ - cfs_waitq_t csi_waitq; - /** error code. */ - int csi_sync_rc; + /** number of pages yet to be transferred. */ + cfs_atomic_t csi_sync_nr; + /** error code. */ + int csi_sync_rc; + /** barrier of destroy this structure */ + cfs_atomic_t csi_barrier; + /** completion to be signaled when transfer is complete. */ + cfs_waitq_t csi_waitq; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); @@ -3218,7 +3263,6 @@ void *cl_env_reenter (void); void cl_env_reexit (void *cookie); void cl_env_implant (struct lu_env *env, int *refcheck); void cl_env_unplant (struct lu_env *env, int *refcheck); -unsigned cl_env_cache_purge(unsigned nr); /** @} cl_env */ @@ -3233,4 +3277,7 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, struct lu_device *next); /** @} clio */ +int cl_global_init(void); +void cl_global_fini(void); + #endif /* _LINUX_CL_OBJECT_H */