X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=db81fb1956cad69b668a3677e0b25776bf73030d;hp=91b32c9d3067790933c752a1a3e2d92cd94c68da;hb=295968fa7257978bba7aa4fedb28cd3563d4a5e0;hpb=f95393b0d0a59cf3dc2f29cffc35dcc4cc9d7728 diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 91b32c9..db81fb1 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -251,6 +251,8 @@ struct cl_object { struct lu_object co_lu; /** per-object-layer operations */ const struct cl_object_operations *co_ops; + /** offset of page slice in cl_page buffer */ + int co_slice_off; }; /** @@ -276,6 +278,26 @@ struct cl_object_conf { * VFS inode. This is consumed by vvp. */ struct inode *coc_inode; + /** + * Layout lock handle. + */ + struct ldlm_lock *coc_lock; + /** + * Operation to handle layout, OBJECT_CONF_XYZ. + */ + int coc_opc; +}; + +enum { + /** configure layout, set up a new stripe, must be called while + * holding layout lock. */ + OBJECT_CONF_SET = 0, + /** invalidate the current stripe configuration due to losing + * layout lock. */ + OBJECT_CONF_INVALIDATE = 1, + /** wait for old layout to go away so that new layout can be + * set up. */ + OBJECT_CONF_WAIT = 2 }; /** @@ -299,10 +321,8 @@ struct cl_object_operations { * \retval valid-pointer pointer to already existing referenced page * to be used instead of newly created. */ - struct cl_page *(*coo_page_init)(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, - cfs_page_t *vmpage); + int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -383,9 +403,9 @@ struct cl_object_header { */ /** @{ */ /** Lock protecting page tree. */ - cfs_spinlock_t coh_page_guard; - /** Lock protecting lock list. */ - cfs_spinlock_t coh_lock_guard; + spinlock_t coh_page_guard; + /** Lock protecting lock list. */ + spinlock_t coh_lock_guard; /** @} locks */ /** Radix tree of cl_page's, cached for this object. */ struct radix_tree_root coh_tree; @@ -409,12 +429,16 @@ struct cl_object_header { * * \todo XXX this can be read/write lock if needed. */ - cfs_spinlock_t coh_attr_guard; - /** - * Number of objects above this one: 0 for a top-object, 1 for its - * sub-object, etc. - */ - unsigned coh_nesting; + spinlock_t coh_attr_guard; + /** + * Size of cl_page + page slices + */ + unsigned short coh_page_bufsize; + /** + * Number of objects above this one: 0 for a top-object, 1 for its + * sub-object, etc. + */ + unsigned char coh_nesting; }; /** @@ -457,7 +481,7 @@ struct cl_object_header { * corresponding radix tree at the corresponding logical offset. * * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), cfs_page_t. It is assumed, that this + * page in Linux kernel, for example), struct page. It is assumed, that this * association is implemented by one of cl_page layers (top layer in the * current design) that * @@ -467,7 +491,7 @@ struct cl_object_header { * - translates state (page flag bits) and locking between lustre and * environment. * - * The association between cl_page and cfs_page_t is immutable and + * The association between cl_page and struct page is immutable and * established when cl_page is created. * * cl_page can be "owned" by a particular cl_io (see below), guaranteeing @@ -476,7 +500,7 @@ struct cl_object_header { * eviction of the page from the memory). Note, that in general cl_io * cannot be identified with a particular thread, and page ownership is not * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and cfs_page_t has to implement + * implementing association between cl_page and struct page has to implement * ownership on top of available synchronization mechanisms. * * While lustre client maintains the notion of an page ownership by io, @@ -510,7 +534,7 @@ struct cl_object_header { * - by doing a lookup in the cl_object radix tree, protected by the * spin-lock; * - * - by starting from VM-locked cfs_page_t and following some + * - by starting from VM-locked struct page and following some * hosting environment method (e.g., following ->private pointer in * the case of Linux kernel), see cl_vmpage_page(); * @@ -537,7 +561,7 @@ struct cl_object_header { * * Linux Kernel implementation. * - * Binding between cl_page and cfs_page_t (which is a typedef for + * Binding between cl_page and struct page (which is a typedef for * struct page) is implemented in the vvp layer. cl_page is attached to the * ->private pointer of the struct page, together with the setting of * PG_private bit in page->flags, and acquiring additional reference on the @@ -686,7 +710,7 @@ enum cl_page_flags { }; /** - * Fields are protected by the lock on cfs_page_t, except for atomics and + * Fields are protected by the lock on struct page, except for atomics and * immutables. * * \invariant Data type invariants are in cl_page_invariant(). Basically: @@ -713,12 +737,10 @@ struct cl_page { * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; - /** - * Linkage of pages within some group. Protected by - * cl_page::cp_mutex. */ - cfs_list_t cp_batch; - /** Mutex serializing membership of a page in a batch. */ - cfs_mutex_t cp_mutex; + /** Linkage of pages within group. Protected by cl_page::cp_mutex. */ + cfs_list_t cp_batch; + /** Mutex serializing membership of a page in a batch. */ + struct mutex cp_mutex; /** Linkage of pages within cl_req. */ cfs_list_t cp_flight; /** Transfer error. */ @@ -747,14 +769,14 @@ struct cl_page { struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; - /** Link to an object, for debugging. */ - struct lu_ref_link *cp_obj_ref; - /** Link to a queue, for debugging. */ - struct lu_ref_link *cp_queue_ref; - /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ - unsigned cp_flags; - /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; + /** Link to an object, for debugging. */ + struct lu_ref_link cp_obj_ref; + /** Link to a queue, for debugging. */ + struct lu_ref_link cp_queue_ref; + /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ + unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -813,7 +835,7 @@ enum cl_req_type { */ struct cl_page_operations { /** - * cl_page<->cfs_page_t methods. Only one layer in the stack has to + * cl_page<->struct page methods. Only one layer in the stack has to * implement these. Current code assumes that this functionality is * provided by the topmost layer, see cl_page_disown0() as an example. */ @@ -821,7 +843,7 @@ struct cl_page_operations { /** * \return the underlying VM page. Optional. */ - cfs_page_t *(*cpo_vmpage)(const struct lu_env *env, + struct page *(*cpo_vmpage)(const struct lu_env *env, const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive @@ -1054,6 +1076,15 @@ struct cl_page_operations { */ int (*cpo_cancel)(const struct lu_env *env, const struct cl_page_slice *slice); + /** + * Write out a page by kernel. This is only called by ll_writepage + * right now. + * + * \see cl_page_flush() + */ + int (*cpo_flush)(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *io); /** @} transfer */ }; @@ -1062,10 +1093,10 @@ struct cl_page_operations { */ #define CL_PAGE_DEBUG(mask, env, page, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ \ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_print(env, &__info, lu_cdebug_printer, page); \ + cl_page_print(env, &msgdata, lu_cdebug_printer, page); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) @@ -1073,16 +1104,26 @@ do { \ /** * Helper macro, dumping shorter information about \a page into a log. */ -#define CL_PAGE_HEADER(mask, env, page, format, ...) \ -do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ - \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_header_print(env, &__info, lu_cdebug_printer, page); \ - CDEBUG(mask, format , ## __VA_ARGS__); \ - } \ +#define CL_PAGE_HEADER(mask, env, page, format, ...) \ +do { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ + \ + if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ + cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \ + CDEBUG(mask, format , ## __VA_ARGS__); \ + } \ } while (0) +static inline int __page_in_use(const struct cl_page *page, int refc) +{ + if (page->cp_type == CPT_CACHEABLE) + ++refc; + LASSERT(cfs_atomic_read(&page->cp_ref) > 0); + return (cfs_atomic_read(&page->cp_ref) > refc); +} +#define cl_page_in_use(pg) __page_in_use(pg, 1) +#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) + /** @} cl_page */ /** \addtogroup cl_lock cl_lock @@ -1440,7 +1481,9 @@ enum cl_lock_flags { /** cancellation is pending for this lock. */ CLF_CANCELPEND = 1 << 1, /** destruction is pending for this lock. */ - CLF_DOOMED = 1 << 2 + CLF_DOOMED = 1 << 2, + /** from enqueue RPC reply upcall. */ + CLF_FROM_UPCALL= 1 << 3, }; /** @@ -1534,7 +1577,7 @@ struct cl_lock { * * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). */ - cfs_mutex_t cll_guard; + struct mutex cll_guard; cfs_task_t *cll_guarder; int cll_depth; @@ -1581,13 +1624,13 @@ struct cl_lock { * A list of holds on this lock, for debugging. */ struct lu_ref cll_holders; - /** - * A reference for cl_lock::cll_descr::cld_obj. For debugging. - */ - struct lu_ref_link *cll_obj_ref; + /** + * A reference for cl_lock::cll_descr::cld_obj. For debugging. + */ + struct lu_ref_link cll_obj_ref; #ifdef CONFIG_LOCKDEP - /* "dep_map" name is assumed by lockdep.h macros. */ - struct lockdep_map dep_map; + /* "dep_map" name is assumed by lockdep.h macros. */ + struct lockdep_map dep_map; #endif }; @@ -1613,9 +1656,11 @@ struct cl_lock_slice { */ enum cl_lock_transition { /** operation cannot be completed immediately. Wait for state change. */ - CLO_WAIT = 1, + CLO_WAIT = 1, /** operation had to release lock mutex, restart. */ - CLO_REPEAT = 2 + CLO_REPEAT = 2, + /** lower layer re-enqueued. */ + CLO_REENQUEUED = 3, }; /** @@ -1786,14 +1831,22 @@ struct cl_lock_operations { #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ \ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_lock_print(env, &__info, lu_cdebug_printer, lock); \ + cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) +#define CL_LOCK_ASSERT(expr, env, lock) do { \ + if (likely(expr)) \ + break; \ + \ + CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \ + LBUG(); \ +} while (0) + /** @} cl_lock */ /** \addtogroup cl_page_list cl_page_list @@ -1897,6 +1950,11 @@ enum cl_io_type { */ CIT_FAULT, /** + * fsync system call handling + * To write out a range of file + */ + CIT_FSYNC, + /** * Miscellaneous io. This is used for occasional io activity that * doesn't fit into other types. Currently this is used for: * @@ -1943,11 +2001,6 @@ enum cl_io_state { CIS_FINI }; -enum cl_req_priority { - CRP_NORMAL, - CRP_CANCEL -}; - /** * IO state private for a layer. * @@ -2065,8 +2118,7 @@ struct cl_io_operations { int (*cio_submit)(const struct lu_env *env, const struct cl_io_slice *slice, enum cl_req_type crt, - struct cl_2queue *queue, - enum cl_req_priority priority); + struct cl_2queue *queue); } req_op[CRT_NR]; /** * Read missing page. @@ -2152,9 +2204,13 @@ enum cl_enq_flags { */ CEF_NEVER = 0x00000010, /** + * for async glimpse lock. + */ + CEF_AGL = 0x00000020, + /** * mask of enq_flags. */ - CEF_MASK = 0x0000001f + CEF_MASK = 0x0000003f, }; /** @@ -2222,6 +2278,18 @@ enum cl_io_lock_dmd { CILR_NEVER }; +enum cl_fsync_mode { + /** start writeback, do not wait for them to finish */ + CL_FSYNC_NONE = 0, + /** start writeback and wait for them to finish */ + CL_FSYNC_LOCAL = 1, + /** discard all of dirty pages in a specific file range */ + CL_FSYNC_DISCARD = 2, + /** start writeback and make sure they have reached storage before + * return. OST_SYNC RPC must be issued and finished */ + CL_FSYNC_ALL = 3 +}; + struct cl_io_rw_common { loff_t crw_pos; size_t crw_count; @@ -2256,11 +2324,6 @@ struct cl_io { struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; - /** - * This io has held grouplock, to inform sublayers that - * don't do lockless i/o. - */ - int ci_no_srvlock; union { struct cl_rd_io { struct cl_io_rw_common rd; @@ -2268,6 +2331,7 @@ struct cl_io { struct cl_wr_io { struct cl_io_rw_common wr; int wr_append; + int wr_sync; } ci_wr; struct cl_io_rw_common ci_rw; struct cl_setattr_io { @@ -2280,18 +2344,57 @@ struct cl_io { pgoff_t ft_index; /** bytes valid byte on a faulted page. */ int ft_nob; - /** writable page? */ + /** writable page? for nopage() only */ int ft_writable; /** page of an executable? */ int ft_executable; + /** page_mkwrite() */ + int ft_mkwrite; /** resulting page */ struct cl_page *ft_page; } ci_fault; + struct cl_fsync_io { + loff_t fi_start; + loff_t fi_end; + struct obd_capa *fi_capa; + /** file system level fid */ + struct lu_fid *fi_fid; + enum cl_fsync_mode fi_mode; + /* how many pages were written/discarded */ + unsigned int fi_nr_written; + } ci_fsync; } u; struct cl_2queue ci_queue; size_t ci_nob; int ci_result; - int ci_continue; + unsigned int ci_continue:1, + /** + * This io has held grouplock, to inform sublayers that + * don't do lockless i/o. + */ + ci_no_srvlock:1, + /** + * The whole IO need to be restarted because layout has been changed + */ + ci_need_restart:1, + /** + * to not refresh layout - the IO issuer knows that the layout won't + * change(page operations, layout change causes all page to be + * discarded), or it doesn't matter if it changes(sync). + */ + ci_ignore_layout:1, + /** + * Check if layout changed after the IO finishes. Mainly for HSM + * requirement. If IO occurs to openning files, it doesn't need to + * verify layout because HSM won't release openning files. + * Right now, only two opertaions need to verify layout: glimpse + * and setattr. + */ + ci_verify_layout:1, + /** + * file is released, restore has to to be triggered by vvp layer + */ + ci_restore_needed:1; /** * Number of pages owned by this IO. For invariant checking. */ @@ -2366,10 +2469,12 @@ struct cl_io { * Per-transfer attributes. */ struct cl_req_attr { - /** Generic attributes for the server consumption. */ - struct obdo *cra_oa; - /** Capability. */ - struct obd_capa *cra_capa; + /** Generic attributes for the server consumption. */ + struct obdo *cra_oa; + /** Capability. */ + struct obd_capa *cra_capa; + /** Jobid */ + char cra_jobid[JOBSTATS_JOBID_SIZE]; }; /** @@ -2415,11 +2520,11 @@ struct cl_req_operations { * A per-object state that (potentially multi-object) transfer request keeps. */ struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link *ro_obj_ref; - /* something else? Number of pages for a given object? */ + /** object itself */ + struct cl_object *ro_obj; + /** reference to cl_req_obj::ro_obj. For debugging. */ + struct lu_ref_link ro_obj_ref; + /* something else? Number of pages for a given object? */ }; /** @@ -2472,22 +2577,29 @@ struct cl_req_slice { /* @} cl_req */ +enum cache_stats_item { + /** how many cache lookups were performed */ + CS_lookup = 0, + /** how many times cache lookup resulted in a hit */ + CS_hit, + /** how many entities are in the cache right now */ + CS_total, + /** how many entities in the cache are actively used (and cannot be + * evicted) right now */ + CS_busy, + /** how many entities were created at all */ + CS_create, + CS_NR +}; + +#define CS_NAMES { "lookup", "hit", "total", "busy", "create" } + /** * Stats for a generic cache (similar to inode, lu_object, etc. caches). */ struct cache_stats { const char *cs_name; - /** how many entities were created at all */ - cfs_atomic_t cs_created; - /** how many cache lookups were performed */ - cfs_atomic_t cs_lookup; - /** how many times cache lookup resulted in a hit */ - cfs_atomic_t cs_hit; - /** how many entities are in the cache right now */ - cfs_atomic_t cs_total; - /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ - cfs_atomic_t cs_busy; + cfs_atomic_t cs_stats[CS_NR]; }; /** These are not exported so far */ @@ -2650,19 +2762,39 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) return cl_object_header(o0) == cl_object_header(o1); } +static inline void cl_object_page_init(struct cl_object *clob, int size) +{ + clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; + cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); +} + +static inline void *cl_object_page_slice(struct cl_object *clob, + struct cl_page *page) +{ + return (void *)((char *)page + clob->co_slice_off); +} + /** @} cl_object */ /** \defgroup cl_page cl_page * @{ */ -struct cl_page *cl_page_lookup(struct cl_object_header *hdr, +enum { + CLP_GANG_OKAY = 0, + CLP_GANG_RESCHED, + CLP_GANG_AGAIN, + CLP_GANG_ABORT +}; + +/* callback of cl_page_gang_lookup() */ +typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, + struct cl_page *, void *); +int cl_page_gang_lookup (const struct lu_env *env, + struct cl_object *obj, + struct cl_io *io, + pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata); +struct cl_page *cl_page_lookup (struct cl_object_header *hdr, pgoff_t index); -void cl_page_gang_lookup(const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - struct cl_page_list *plist, - int nonblock, - int *resched); struct cl_page *cl_page_find (const struct lu_env *env, struct cl_object *obj, pgoff_t idx, struct page *vmpage, @@ -2680,11 +2812,10 @@ void cl_page_print (const struct lu_env *env, void *cookie, void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); -cfs_page_t *cl_page_vmpage (const struct lu_env *env, +struct page *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); -struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); +struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); -int cl_is_page (const void *addr); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2728,6 +2859,8 @@ int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, void cl_page_clip (const struct lu_env *env, struct cl_page *pg, int from, int to); int cl_page_cancel (const struct lu_env *env, struct cl_page *page); +int cl_page_flush (const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2774,9 +2907,20 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, const char *scope, const void *source); -struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct cl_lock *except, - int pending, int canceld); +struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, + struct cl_object *obj, pgoff_t index, + struct cl_lock *except, int pending, + int canceld); +static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, + struct cl_object *obj, + struct cl_page *page, + struct cl_lock *except, + int pending, int canceld) +{ + LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj)); + return cl_lock_at_pgoff(env, obj, page->cp_index, except, + pending, canceld); +} const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); @@ -2786,21 +2930,24 @@ void cl_lock_get_trust (struct cl_lock *lock); void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); +void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); enum cl_lock_state cl_lock_intransit(const struct lu_env *env, struct cl_lock *lock); - void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state); - int cl_lock_is_intransit(struct cl_lock *lock); +int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, + int keep_mutex); + /** \name statemachine statemachine * Interface to lock state machine consists of 3 parts: * @@ -2842,6 +2989,7 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); + /** @} statemachine */ void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); @@ -2856,8 +3004,7 @@ int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock); void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_mutexed (struct cl_lock *lock); int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_page_out (const struct lu_env *env, struct cl_lock *lock, - int discard); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); int cl_lock_ext_match (const struct cl_lock_descr *has, const struct cl_lock_descr *need); int cl_lock_descr_match(const struct cl_lock_descr *has, @@ -2881,7 +3028,6 @@ void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); -int cl_is_lock (const void *addr); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -2916,11 +3062,10 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, struct cl_page *page, unsigned from, unsigned to); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority); + enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority, long timeout); + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, @@ -2935,6 +3080,16 @@ static inline int cl_io_is_append(const struct cl_io *io) return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } +static inline int cl_io_is_sync_write(const struct cl_io *io) +{ + return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync; +} + +static inline int cl_io_is_mkwrite(const struct cl_io *io) +{ + return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; +} + /** * True, iff \a io is a truncate(2). */ @@ -2964,6 +3119,15 @@ do { \ * @{ */ /** + * Last page in the page list. + */ +static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) +{ + LASSERT(plist->pl_nr > 0); + return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); +} + +/** * Iterate over pages in a page list. */ #define cl_page_list_for_each(page, list) \ @@ -3031,12 +3195,14 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); * anchor and wakes up waiting thread when transfer is complete. */ struct cl_sync_io { - /** number of pages yet to be transferred. */ - cfs_atomic_t csi_sync_nr; - /** completion to be signaled when transfer is complete. */ - cfs_waitq_t csi_waitq; - /** error code. */ - int csi_sync_rc; + /** number of pages yet to be transferred. */ + cfs_atomic_t csi_sync_nr; + /** error code. */ + int csi_sync_rc; + /** barrier of destroy this structure */ + cfs_atomic_t csi_barrier; + /** completion to be signaled when transfer is complete. */ + cfs_waitq_t csi_waitq; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); @@ -3101,7 +3267,6 @@ void *cl_env_reenter (void); void cl_env_reexit (void *cookie); void cl_env_implant (struct lu_env *env, int *refcheck); void cl_env_unplant (struct lu_env *env, int *refcheck); -unsigned cl_env_cache_purge(unsigned nr); /** @} cl_env */ @@ -3116,4 +3281,7 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, struct lu_device *next); /** @} clio */ +int cl_global_init(void); +void cl_global_fini(void); + #endif /* _LINUX_CL_OBJECT_H */