X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosc%2Fosc_cl_internal.h;h=08082da1f64081cf5729dd1a88108128d2160aa7;hb=dff46e780827cf723c90bd349bc22951fb46e0ae;hp=f23f927a24c575f74ad4720d85305f796711717c;hpb=4fcbd1af9ec3b1e5f6424d925f43f0cb2910c3ec;p=fs%2Flustre-release.git diff --git a/lustre/osc/osc_cl_internal.h b/lustre/osc/osc_cl_internal.h index f23f927..08082da 100644 --- a/lustre/osc/osc_cl_internal.h +++ b/lustre/osc/osc_cl_internal.h @@ -170,8 +170,8 @@ struct osc_object { cfs_list_t oo_reading_exts; - cfs_atomic_t oo_nr_reads; - cfs_atomic_t oo_nr_writes; + atomic_t oo_nr_reads; + atomic_t oo_nr_writes; /** Protect extent tree. Will be used to protect * oo_{read|write}_pages soon. */ @@ -276,16 +276,6 @@ struct osc_lock { enum osc_lock_state ols_state; /** - * How many pages are using this lock for io, currently only used by - * read-ahead. If non-zero, the underlying dlm lock won't be cancelled - * during recovery to avoid deadlock. see bz16774. - * - * \see osc_page::ops_lock - * \see osc_page_addref_lock(), osc_page_putref_lock() - */ - cfs_atomic_t ols_pageref; - - /** * true, if ldlm_lock_addref() was called against * osc_lock::ols_lock. This is used for sanity checking. * @@ -402,16 +392,6 @@ struct osc_page { * Submit time - the time when the page is starting RPC. For debugging. */ cfs_time_t ops_submit_time; - - /** - * A lock of which we hold a reference covers this page. Only used by - * read-ahead: for a readahead page, we hold it's covering lock to - * prevent it from being canceled during recovery. - * - * \see osc_lock::ols_pageref - * \see osc_page_addref_lock(), osc_page_putref_lock(). - */ - struct cl_lock *ops_lock; }; extern struct kmem_cache *osc_lock_kmem; @@ -438,7 +418,7 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, pgoff_t ind); void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj, pgoff_t start, pgoff_t end); @@ -575,6 +555,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap) return container_of0(oap, struct osc_page, ops_oap); } +static inline pgoff_t osc_index(struct osc_page *opg) +{ + return opg->ops_cl.cpl_index; +} + static inline struct cl_page *oap2cl_page(struct osc_async_page *oap) { return oap2osc(oap)->ops_cl.cpl_page; @@ -585,11 +570,6 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap) return (struct osc_page *)container_of(oap, struct osc_page, ops_oap); } -static inline pgoff_t osc_index(struct osc_page *opg) -{ - return opg->ops_cl.cpl_page->cp_index; -} - static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice) { LINVRNT(osc_is_object(&slice->cls_obj->co_lu)); @@ -634,70 +614,70 @@ enum osc_extent_state { */ struct osc_extent { /** red-black tree node */ - struct rb_node oe_node; + struct rb_node oe_node; /** osc_object of this extent */ - struct osc_object *oe_obj; + struct osc_object *oe_obj; /** refcount, removed from red-black tree if reaches zero. */ - cfs_atomic_t oe_refc; + atomic_t oe_refc; /** busy if non-zero */ - cfs_atomic_t oe_users; + atomic_t oe_users; /** link list of osc_object's oo_{hp|urgent|locking}_exts. */ - cfs_list_t oe_link; + cfs_list_t oe_link; /** state of this extent */ - unsigned int oe_state; + unsigned int oe_state; /** flags for this extent. */ - unsigned int oe_intree:1, + unsigned int oe_intree:1, /** 0 is write, 1 is read */ - oe_rw:1, - oe_srvlock:1, - oe_memalloc:1, + oe_rw:1, + oe_srvlock:1, + oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent * is released, it will turn into TRUNC state instead of CACHE. */ - oe_trunc_pending:1, + oe_trunc_pending:1, /** this extent should be written asap and someone may wait for the * write to finish. This bit is usually set along with urgent if * the extent was CACHE state. * fsync_wait extent can't be merged because new extent region may * exceed fsync range. */ - oe_fsync_wait:1, + oe_fsync_wait:1, /** covering lock is being canceled */ - oe_hp:1, + oe_hp:1, /** this extent should be written back asap. set if one of pages is * called by page WB daemon, or sync write or reading requests. */ - oe_urgent:1; + oe_urgent:1; /** how many grants allocated for this extent. * Grant allocated for this extent. There is no grant allocated * for reading extents and sync write extents. */ - unsigned int oe_grants; + unsigned int oe_grants; /** # of dirty pages in this extent */ - unsigned int oe_nr_pages; + unsigned int oe_nr_pages; /** list of pending oap pages. Pages in this list are NOT sorted. */ - cfs_list_t oe_pages; + cfs_list_t oe_pages; /** Since an extent has to be written out in atomic, this is used to * remember the next page need to be locked to write this extent out. * Not used right now. */ - struct osc_page *oe_next_page; + struct osc_page *oe_next_page; /** start and end index of this extent, include start and end * themselves. Page offset here is the page index of osc_pages. * oe_start is used as keyword for red-black tree. */ - pgoff_t oe_start; - pgoff_t oe_end; + pgoff_t oe_start; + pgoff_t oe_end; /** maximum ending index of this extent, this is limited by * max_pages_per_rpc, lock extent and chunk size. */ - pgoff_t oe_max_end; + pgoff_t oe_max_end; /** waitqueue - for those who want to be notified if this extent's * state has changed. */ - wait_queue_head_t oe_waitq; + wait_queue_head_t oe_waitq; /** lock covering this extent */ - struct cl_lock *oe_osclock; + struct cl_lock *oe_osclock; /** terminator of this extent. Must be true if this extent is in IO. */ - struct task_struct *oe_owner; + struct task_struct *oe_owner; /** return value of writeback. If somebody is waiting for this extent, * this value can be known by outside world. */ - int oe_rc; + int oe_rc; /** max pages per rpc when this extent was created */ - unsigned int oe_mppr; + unsigned int oe_mppr; }; int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,