X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=cfaa8fc271156d65121cb4d3431c6e13d2001563;hp=06cb4d9ee43a55181d864cb1a3afc6988d0c87ab;hb=93fe562c5dd3829939e3bc0533918f66b19776a4;hpb=33257361eef3aeb09eee0d10026be17b6f3f5bcb diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 06cb4d9..cfaa8fc 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -98,7 +98,6 @@ * super-class definitions. */ #include -#include #ifdef __KERNEL__ # include # include @@ -251,6 +250,8 @@ struct cl_object { struct lu_object co_lu; /** per-object-layer operations */ const struct cl_object_operations *co_ops; + /** offset of page slice in cl_page buffer */ + int co_slice_off; }; /** @@ -277,10 +278,25 @@ struct cl_object_conf { */ struct inode *coc_inode; /** - * Invalidate the current stripe configuration due to losing - * layout lock. + * Layout lock handle. */ - bool coc_invalidate; + struct ldlm_lock *coc_lock; + /** + * Operation to handle layout, OBJECT_CONF_XYZ. + */ + int coc_opc; +}; + +enum { + /** configure layout, set up a new stripe, must be called while + * holding layout lock. */ + OBJECT_CONF_SET = 0, + /** invalidate the current stripe configuration due to losing + * layout lock. */ + OBJECT_CONF_INVALIDATE = 1, + /** wait for old layout to go away so that new layout can be + * set up. */ + OBJECT_CONF_WAIT = 2 }; /** @@ -304,10 +320,8 @@ struct cl_object_operations { * \retval valid-pointer pointer to already existing referenced page * to be used instead of newly created. */ - struct cl_page *(*coo_page_init)(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, - cfs_page_t *vmpage); + int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -416,10 +430,14 @@ struct cl_object_header { */ spinlock_t coh_attr_guard; /** + * Size of cl_page + page slices + */ + unsigned short coh_page_bufsize; + /** * Number of objects above this one: 0 for a top-object, 1 for its * sub-object, etc. */ - unsigned coh_nesting; + unsigned char coh_nesting; }; /** @@ -462,7 +480,7 @@ struct cl_object_header { * corresponding radix tree at the corresponding logical offset. * * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), cfs_page_t. It is assumed, that this + * page in Linux kernel, for example), struct page. It is assumed, that this * association is implemented by one of cl_page layers (top layer in the * current design) that * @@ -472,7 +490,7 @@ struct cl_object_header { * - translates state (page flag bits) and locking between lustre and * environment. * - * The association between cl_page and cfs_page_t is immutable and + * The association between cl_page and struct page is immutable and * established when cl_page is created. * * cl_page can be "owned" by a particular cl_io (see below), guaranteeing @@ -481,7 +499,7 @@ struct cl_object_header { * eviction of the page from the memory). Note, that in general cl_io * cannot be identified with a particular thread, and page ownership is not * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and cfs_page_t has to implement + * implementing association between cl_page and struct page has to implement * ownership on top of available synchronization mechanisms. * * While lustre client maintains the notion of an page ownership by io, @@ -515,7 +533,7 @@ struct cl_object_header { * - by doing a lookup in the cl_object radix tree, protected by the * spin-lock; * - * - by starting from VM-locked cfs_page_t and following some + * - by starting from VM-locked struct page and following some * hosting environment method (e.g., following ->private pointer in * the case of Linux kernel), see cl_vmpage_page(); * @@ -542,7 +560,7 @@ struct cl_object_header { * * Linux Kernel implementation. * - * Binding between cl_page and cfs_page_t (which is a typedef for + * Binding between cl_page and struct page (which is a typedef for * struct page) is implemented in the vvp layer. cl_page is attached to the * ->private pointer of the struct page, together with the setting of * PG_private bit in page->flags, and acquiring additional reference on the @@ -691,7 +709,7 @@ enum cl_page_flags { }; /** - * Fields are protected by the lock on cfs_page_t, except for atomics and + * Fields are protected by the lock on struct page, except for atomics and * immutables. * * \invariant Data type invariants are in cl_page_invariant(). Basically: @@ -741,7 +759,7 @@ struct cl_page { /** * Debug information, the task is owning the page. */ - cfs_task_t *cp_task; + struct task_struct *cp_task; /** * Owning IO request in cl_page_state::CPS_PAGEOUT and * cl_page_state::CPS_PAGEIN states. This field is maintained only in @@ -750,14 +768,14 @@ struct cl_page { struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; - /** Link to an object, for debugging. */ - struct lu_ref_link *cp_obj_ref; - /** Link to a queue, for debugging. */ - struct lu_ref_link *cp_queue_ref; - /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ - unsigned cp_flags; - /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; + /** Link to an object, for debugging. */ + struct lu_ref_link cp_obj_ref; + /** Link to a queue, for debugging. */ + struct lu_ref_link cp_queue_ref; + /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ + unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -816,7 +834,7 @@ enum cl_req_type { */ struct cl_page_operations { /** - * cl_page<->cfs_page_t methods. Only one layer in the stack has to + * cl_page<->struct page methods. Only one layer in the stack has to * implement these. Current code assumes that this functionality is * provided by the topmost layer, see cl_page_disown0() as an example. */ @@ -824,7 +842,7 @@ struct cl_page_operations { /** * \return the underlying VM page. Optional. */ - cfs_page_t *(*cpo_vmpage)(const struct lu_env *env, + struct page *(*cpo_vmpage)(const struct lu_env *env, const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive @@ -1542,30 +1560,30 @@ struct cl_lock { */ struct cl_lock_descr cll_descr; /** Protected by cl_lock::cll_guard. */ - enum cl_lock_state cll_state; - /** signals state changes. */ - cfs_waitq_t cll_wq; - /** - * Recursive lock, most fields in cl_lock{} are protected by this. - * - * Locking rules: this mutex is never held across network - * communication, except when lock is being canceled. - * - * Lock ordering: a mutex of a sub-lock is taken first, then a mutex - * on a top-lock. Other direction is implemented through a - * try-lock-repeat loop. Mutices of unrelated locks can be taken only - * by try-locking. - * - * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). - */ + enum cl_lock_state cll_state; + /** signals state changes. */ + wait_queue_head_t cll_wq; + /** + * Recursive lock, most fields in cl_lock{} are protected by this. + * + * Locking rules: this mutex is never held across network + * communication, except when lock is being canceled. + * + * Lock ordering: a mutex of a sub-lock is taken first, then a mutex + * on a top-lock. Other direction is implemented through a + * try-lock-repeat loop. Mutices of unrelated locks can be taken only + * by try-locking. + * + * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). + */ struct mutex cll_guard; - cfs_task_t *cll_guarder; + struct task_struct *cll_guarder; int cll_depth; /** * the owner for INTRANSIT state */ - cfs_task_t *cll_intransit_owner; + struct task_struct *cll_intransit_owner; int cll_error; /** * Number of holds on a lock. A hold prevents a lock from being @@ -1605,13 +1623,13 @@ struct cl_lock { * A list of holds on this lock, for debugging. */ struct lu_ref cll_holders; - /** - * A reference for cl_lock::cll_descr::cld_obj. For debugging. - */ - struct lu_ref_link *cll_obj_ref; + /** + * A reference for cl_lock::cll_descr::cld_obj. For debugging. + */ + struct lu_ref_link cll_obj_ref; #ifdef CONFIG_LOCKDEP - /* "dep_map" name is assumed by lockdep.h macros. */ - struct lockdep_map dep_map; + /* "dep_map" name is assumed by lockdep.h macros. */ + struct lockdep_map dep_map; #endif }; @@ -1850,9 +1868,9 @@ do { \ * @{ */ struct cl_page_list { - unsigned pl_nr; - cfs_list_t pl_pages; - cfs_task_t *pl_owner; + unsigned pl_nr; + cfs_list_t pl_pages; + struct task_struct *pl_owner; }; /** @@ -2256,9 +2274,7 @@ enum cl_io_lock_dmd { /** Layers are free to decide between local and global locking. */ CILR_MAYBE, /** Never lock: there is no cache (e.g., liblustre). */ - CILR_NEVER, - /** Peek lock: use existing locks, don't queue new ones */ - CILR_PEEK + CILR_NEVER }; enum cl_fsync_mode { @@ -2373,11 +2389,19 @@ struct cl_io { * Right now, only two opertaions need to verify layout: glimpse * and setattr. */ - ci_verify_layout:1; - /** - * Number of pages owned by this IO. For invariant checking. - */ - unsigned ci_owned_nr; + ci_verify_layout:1, + /** + * file is released, restore has to to be triggered by vvp layer + */ + ci_restore_needed:1, + /** + * O_NOATIME + */ + ci_noatime:1; + /** + * Number of pages owned by this IO. For invariant checking. + */ + unsigned ci_owned_nr; }; /** @} cl_io */ @@ -2499,11 +2523,11 @@ struct cl_req_operations { * A per-object state that (potentially multi-object) transfer request keeps. */ struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link *ro_obj_ref; - /* something else? Number of pages for a given object? */ + /** object itself */ + struct cl_object *ro_obj; + /** reference to cl_req_obj::ro_obj. For debugging. */ + struct lu_ref_link ro_obj_ref; + /* something else? Number of pages for a given object? */ }; /** @@ -2741,6 +2765,18 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) return cl_object_header(o0) == cl_object_header(o1); } +static inline void cl_object_page_init(struct cl_object *clob, int size) +{ + clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; + cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); +} + +static inline void *cl_object_page_slice(struct cl_object *clob, + struct cl_page *page) +{ + return (void *)((char *)page + clob->co_slice_off); +} + /** @} cl_object */ /** \defgroup cl_page cl_page @@ -2779,9 +2815,9 @@ void cl_page_print (const struct lu_env *env, void *cookie, void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); -cfs_page_t *cl_page_vmpage (const struct lu_env *env, +struct page *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); -struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); +struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, @@ -3162,12 +3198,14 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); * anchor and wakes up waiting thread when transfer is complete. */ struct cl_sync_io { - /** number of pages yet to be transferred. */ - cfs_atomic_t csi_sync_nr; - /** completion to be signaled when transfer is complete. */ - cfs_waitq_t csi_waitq; - /** error code. */ - int csi_sync_rc; + /** number of pages yet to be transferred. */ + cfs_atomic_t csi_sync_nr; + /** error code. */ + int csi_sync_rc; + /** barrier of destroy this structure */ + cfs_atomic_t csi_barrier; + /** completion to be signaled when transfer is complete. */ + wait_queue_head_t csi_waitq; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); @@ -3247,4 +3285,7 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, struct lu_device *next); /** @} clio */ +int cl_global_init(void); +void cl_global_fini(void); + #endif /* _LINUX_CL_OBJECT_H */