X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=ea0597af3a9415de0a722ddeb3ff8c8fe5854b3c;hp=9f0396805ba427372d6abf5d2199c88f8b5bec72;hb=ea1a05b99c63689771fd678d10e9748ce18e129c;hpb=bd87398d3b5793a8939731cf5b3f11086d64a8ed diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 9f03968..ea0597a 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -98,7 +98,6 @@ * super-class definitions. */ #include -#include #ifdef __KERNEL__ # include # include @@ -251,6 +250,8 @@ struct cl_object { struct lu_object co_lu; /** per-object-layer operations */ const struct cl_object_operations *co_ops; + /** offset of page slice in cl_page buffer */ + int co_slice_off; }; /** @@ -276,6 +277,26 @@ struct cl_object_conf { * VFS inode. This is consumed by vvp. */ struct inode *coc_inode; + /** + * Layout lock handle. + */ + struct ldlm_lock *coc_lock; + /** + * Operation to handle layout, OBJECT_CONF_XYZ. + */ + int coc_opc; +}; + +enum { + /** configure layout, set up a new stripe, must be called while + * holding layout lock. */ + OBJECT_CONF_SET = 0, + /** invalidate the current stripe configuration due to losing + * layout lock. */ + OBJECT_CONF_INVALIDATE = 1, + /** wait for old layout to go away so that new layout can be + * set up. */ + OBJECT_CONF_WAIT = 2 }; /** @@ -299,10 +320,8 @@ struct cl_object_operations { * \retval valid-pointer pointer to already existing referenced page * to be used instead of newly created. */ - struct cl_page *(*coo_page_init)(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, - cfs_page_t *vmpage); + int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -368,6 +387,12 @@ struct cl_object_operations { */ int (*coo_glimpse)(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb); + /** + * Object prune method. Called when the layout is going to change on + * this object, therefore each layer has to clean up their cache, + * mainly pages and locks. + */ + int (*coo_prune)(const struct lu_env *env, struct cl_object *obj); }; /** @@ -382,17 +407,11 @@ struct cl_object_header { * mostly useless otherwise. */ /** @{ */ - /** Lock protecting page tree. */ - spinlock_t coh_page_guard; - /** Lock protecting lock list. */ - spinlock_t coh_lock_guard; + /** Lock protecting lock list. */ + spinlock_t coh_lock_guard; /** @} locks */ - /** Radix tree of cl_page's, cached for this object. */ - struct radix_tree_root coh_tree; - /** # of pages in radix tree. */ - unsigned long coh_pages; /** List of cl_lock's granted for this object. */ - struct list_head coh_locks; + cfs_list_t coh_locks; /** * Parent object. It is assumed that an object has a well-defined @@ -409,30 +428,34 @@ struct cl_object_header { * * \todo XXX this can be read/write lock if needed. */ - spinlock_t coh_attr_guard; - /** - * Number of objects above this one: 0 for a top-object, 1 for its - * sub-object, etc. - */ - unsigned coh_nesting; + spinlock_t coh_attr_guard; + /** + * Size of cl_page + page slices + */ + unsigned short coh_page_bufsize; + /** + * Number of objects above this one: 0 for a top-object, 1 for its + * sub-object, etc. + */ + unsigned char coh_nesting; }; /** * Helper macro: iterate over all layers of the object \a obj, assigning every * layer top-to-bottom to \a slice. */ -#define cl_object_for_each(slice, obj) \ - list_for_each_entry((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each(slice, obj) \ + cfs_list_for_each_entry((slice), \ + &(obj)->co_lu.lo_header->loh_layers, \ + co_lu.lo_linkage) /** * Helper macro: iterate over all layers of the object \a obj, assigning every * layer bottom-to-top to \a slice. */ -#define cl_object_for_each_reverse(slice, obj) \ - list_for_each_entry_reverse((slice), \ - &(obj)->co_lu.lo_header->loh_layers, \ - co_lu.lo_linkage) +#define cl_object_for_each_reverse(slice, obj) \ + cfs_list_for_each_entry_reverse((slice), \ + &(obj)->co_lu.lo_header->loh_layers, \ + co_lu.lo_linkage) /** @} cl_object */ #ifndef pgoff_t @@ -457,7 +480,7 @@ struct cl_object_header { * corresponding radix tree at the corresponding logical offset. * * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), cfs_page_t. It is assumed, that this + * page in Linux kernel, for example), struct page. It is assumed, that this * association is implemented by one of cl_page layers (top layer in the * current design) that * @@ -467,7 +490,7 @@ struct cl_object_header { * - translates state (page flag bits) and locking between lustre and * environment. * - * The association between cl_page and cfs_page_t is immutable and + * The association between cl_page and struct page is immutable and * established when cl_page is created. * * cl_page can be "owned" by a particular cl_io (see below), guaranteeing @@ -476,7 +499,7 @@ struct cl_object_header { * eviction of the page from the memory). Note, that in general cl_io * cannot be identified with a particular thread, and page ownership is not * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and cfs_page_t has to implement + * implementing association between cl_page and struct page has to implement * ownership on top of available synchronization mechanisms. * * While lustre client maintains the notion of an page ownership by io, @@ -510,7 +533,7 @@ struct cl_object_header { * - by doing a lookup in the cl_object radix tree, protected by the * spin-lock; * - * - by starting from VM-locked cfs_page_t and following some + * - by starting from VM-locked struct page and following some * hosting environment method (e.g., following ->private pointer in * the case of Linux kernel), see cl_vmpage_page(); * @@ -537,7 +560,7 @@ struct cl_object_header { * * Linux Kernel implementation. * - * Binding between cl_page and cfs_page_t (which is a typedef for + * Binding between cl_page and struct page (which is a typedef for * struct page) is implemented in the vvp layer. cl_page is attached to the * ->private pointer of the struct page, together with the setting of * PG_private bit in page->flags, and acquiring additional reference on the @@ -686,7 +709,7 @@ enum cl_page_flags { }; /** - * Fields are protected by the lock on cfs_page_t, except for atomics and + * Fields are protected by the lock on struct page, except for atomics and * immutables. * * \invariant Data type invariants are in cl_page_invariant(). Basically: @@ -696,13 +719,13 @@ enum cl_page_flags { */ struct cl_page { /** Reference counter. */ - atomic_t cp_ref; + cfs_atomic_t cp_ref; /** An object this page is a part of. Immutable after creation. */ struct cl_object *cp_obj; /** Logical page index within the object. Immutable after creation. */ pgoff_t cp_index; /** List of slices. Immutable after creation. */ - struct list_head cp_layers; + cfs_list_t cp_layers; /** Parent page, NULL for top-level page. Immutable after creation. */ struct cl_page *cp_parent; /** Lower-layer page. NULL for bottommost page. Immutable after @@ -713,14 +736,12 @@ struct cl_page { * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; - /** - * Linkage of pages within some group. Protected by - * cl_page::cp_mutex. */ - struct list_head cp_batch; - /** Mutex serializing membership of a page in a batch. */ - struct mutex cp_mutex; + /** Linkage of pages within group. Protected by cl_page::cp_mutex. */ + cfs_list_t cp_batch; + /** Mutex serializing membership of a page in a batch. */ + struct mutex cp_mutex; /** Linkage of pages within cl_req. */ - struct list_head cp_flight; + cfs_list_t cp_flight; /** Transfer error. */ int cp_error; @@ -738,7 +759,7 @@ struct cl_page { /** * Debug information, the task is owning the page. */ - cfs_task_t *cp_task; + struct task_struct *cp_task; /** * Owning IO request in cl_page_state::CPS_PAGEOUT and * cl_page_state::CPS_PAGEIN states. This field is maintained only in @@ -747,14 +768,14 @@ struct cl_page { struct cl_req *cp_req; /** List of references to this page, for debugging. */ struct lu_ref cp_reference; - /** Link to an object, for debugging. */ - struct lu_ref_link *cp_obj_ref; - /** Link to a queue, for debugging. */ - struct lu_ref_link *cp_queue_ref; - /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ - unsigned cp_flags; - /** Assigned if doing a sync_io */ - struct cl_sync_io *cp_sync_io; + /** Link to an object, for debugging. */ + struct lu_ref_link cp_obj_ref; + /** Link to a queue, for debugging. */ + struct lu_ref_link cp_queue_ref; + /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */ + unsigned cp_flags; + /** Assigned if doing a sync_io */ + struct cl_sync_io *cp_sync_io; }; /** @@ -771,7 +792,7 @@ struct cl_page_slice { struct cl_object *cpl_obj; const struct cl_page_operations *cpl_ops; /** Linkage into cl_page::cp_layers. Immutable after creation. */ - struct list_head cpl_linkage; + cfs_list_t cpl_linkage; }; /** @@ -813,7 +834,7 @@ enum cl_req_type { */ struct cl_page_operations { /** - * cl_page<->cfs_page_t methods. Only one layer in the stack has to + * cl_page<->struct page methods. Only one layer in the stack has to * implement these. Current code assumes that this functionality is * provided by the topmost layer, see cl_page_disown0() as an example. */ @@ -821,7 +842,7 @@ struct cl_page_operations { /** * \return the underlying VM page. Optional. */ - cfs_page_t *(*cpo_vmpage)(const struct lu_env *env, + struct page *(*cpo_vmpage)(const struct lu_env *env, const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive @@ -832,8 +853,9 @@ struct cl_page_operations { * \see cl_page_own() * \see vvp_page_own(), lov_page_own() */ - void (*cpo_own)(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); + int (*cpo_own)(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *io, int nonblock); /** Called when ownership it yielded. Optional. * * \see cl_page_disown() @@ -861,7 +883,7 @@ struct cl_page_operations { const struct cl_page_slice *slice, struct cl_io *io); /** - * Announces whether the page contains valid data or not by @uptodate. + * Announces whether the page contains valid data or not by \a uptodate. * * \see cl_page_export() * \see vvp_page_export() @@ -869,14 +891,6 @@ struct cl_page_operations { void (*cpo_export)(const struct lu_env *env, const struct cl_page_slice *slice, int uptodate); /** - * Unmaps page from the user space (if it is mapped). - * - * \see cl_page_unmap() - * \see vvp_page_unmap() - */ - int (*cpo_unmap)(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io); - /** * Checks whether underlying VM page is locked (in the suitable * sense). Used for assertions. * @@ -999,26 +1013,6 @@ struct cl_page_operations { */ int (*cpo_make_ready)(const struct lu_env *env, const struct cl_page_slice *slice); - /** - * Announce that this page is to be written out - * opportunistically, that is, page is dirty, it is not - * necessary to start write-out transfer right now, but - * eventually page has to be written out. - * - * Main caller of this is the write path (see - * vvp_io_commit_write()), using this method to build a - * "transfer cache" from which large transfers are then - * constructed by the req-formation engine. - * - * \todo XXX it would make sense to add page-age tracking - * semantics here, and to oblige the req-formation engine to - * send the page out not later than it is too old. - * - * \see cl_page_cache_add() - */ - int (*cpo_cache_add)(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *io); } io[CRT_NR]; /** * Tell transfer engine that only [to, from] part of a page should be @@ -1053,6 +1047,15 @@ struct cl_page_operations { */ int (*cpo_cancel)(const struct lu_env *env, const struct cl_page_slice *slice); + /** + * Write out a page by kernel. This is only called by ll_writepage + * right now. + * + * \see cl_page_flush() + */ + int (*cpo_flush)(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *io); /** @} transfer */ }; @@ -1061,10 +1064,9 @@ struct cl_page_operations { */ #define CL_PAGE_DEBUG(mask, env, page, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ - \ - if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_print(env, &__info, lu_cdebug_printer, page); \ + if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ + cl_page_print(env, &msgdata, lu_cdebug_printer, page); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) @@ -1072,16 +1074,25 @@ do { \ /** * Helper macro, dumping shorter information about \a page into a log. */ -#define CL_PAGE_HEADER(mask, env, page, format, ...) \ -do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ - \ - if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_header_print(env, &__info, lu_cdebug_printer, page); \ - CDEBUG(mask, format , ## __VA_ARGS__); \ - } \ +#define CL_PAGE_HEADER(mask, env, page, format, ...) \ +do { \ + if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ + cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \ + CDEBUG(mask, format , ## __VA_ARGS__); \ + } \ } while (0) +static inline int __page_in_use(const struct cl_page *page, int refc) +{ + if (page->cp_type == CPT_CACHEABLE) + ++refc; + LASSERT(cfs_atomic_read(&page->cp_ref) > 0); + return (cfs_atomic_read(&page->cp_ref) > refc); +} +#define cl_page_in_use(pg) __page_in_use(pg, 1) +#define cl_page_in_use_noref(pg) __page_in_use(pg, 0) + /** @} cl_page */ /** \addtogroup cl_lock cl_lock @@ -1300,6 +1311,11 @@ struct cl_lock_descr { __u64 cld_gid; /** Lock mode. */ enum cl_lock_mode cld_mode; + /** + * flags to enqueue lock. A combination of bit-flags from + * enum cl_enq_flags. + */ + __u32 cld_enq_flags; }; #define DDESCR "%s(%d):[%lu, %lu]" @@ -1336,15 +1352,15 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * | | V * | | HELD<---------+ * | | | | - * | | | | + * | | | | cl_use_try() * | | cl_unuse_try() | | * | | | | - * | | V | cached - * | +------------>UNLOCKING (*) | lock found - * | | | - * | cl_unuse_try() | | + * | | V ---+ + * | +------------>INTRANSIT (D) <--+ * | | | + * | cl_unuse_try() | | cached lock found * | | | cl_use_try() + * | | | * | V | * +------------------CACHED---------+ * | @@ -1363,6 +1379,8 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * * (C) is the point where Cancellation call-back is invoked. * + * (D) is the transit state which means the lock is changing. + * * Transition to FREEING state is possible from any other state in the * diagram in case of unrecoverable error. * @@ -1381,9 +1399,6 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode); * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note * that in this case, sub-locks move from state to state, and top-lock remains * in the same state). - * - * Separate UNLOCKING state is needed to maintain an invariant that in HELD - * state lock is immediately ready for use. */ enum cl_lock_state { /** @@ -1405,10 +1420,16 @@ enum cl_lock_state { */ CLS_HELD, /** - * Lock is in the transition from CLS_HELD to CLS_CACHED. Lock is in - * this state only while cl_unuse() is executing against it. + * This state is used to mark the lock is being used, or unused. + * We need this state because the lock may have several sublocks, + * so it's impossible to have an atomic way to bring all sublocks + * into CLS_HELD state at use case, or all sublocks to CLS_CACHED + * at unuse case. + * If a thread is referring to a lock, and it sees the lock is in this + * state, it must wait for the lock. + * See state diagram for details. */ - CLS_UNLOCKING, + CLS_INTRANSIT, /** * Lock granted, not used. */ @@ -1430,8 +1451,8 @@ enum cl_lock_flags { CLF_CANCELPEND = 1 << 1, /** destruction is pending for this lock. */ CLF_DOOMED = 1 << 2, - /** State update is pending. */ - CLF_STATE = 1 << 3 + /** from enqueue RPC reply upcall. */ + CLF_FROM_UPCALL= 1 << 3, }; /** @@ -1467,7 +1488,7 @@ struct cl_lock_closure { * List of enclosed locks, so far. Locks are linked here through * cl_lock::cll_inclosure. */ - struct list_head clc_list; + cfs_list_t clc_list; /** * True iff closure is in a `wait' mode. This determines what * cl_lock_enclosure() does when a lock L to be added to the closure @@ -1493,14 +1514,14 @@ struct cl_lock_closure { */ struct cl_lock { /** Reference counter. */ - atomic_t cll_ref; + cfs_atomic_t cll_ref; /** List of slices. Immutable after creation. */ - struct list_head cll_layers; + cfs_list_t cll_layers; /** * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected * by cl_lock::cll_descr::cld_obj::coh_lock_guard. */ - struct list_head cll_linkage; + cfs_list_t cll_linkage; /** * Parameters of this lock. Protected by * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within @@ -1509,26 +1530,30 @@ struct cl_lock { */ struct cl_lock_descr cll_descr; /** Protected by cl_lock::cll_guard. */ - enum cl_lock_state cll_state; - /** signals state changes. */ - cfs_waitq_t cll_wq; - /** - * Recursive lock, most fields in cl_lock{} are protected by this. - * - * Locking rules: this mutex is never held across network - * communication, except when lock is being canceled. - * - * Lock ordering: a mutex of a sub-lock is taken first, then a mutex - * on a top-lock. Other direction is implemented through a - * try-lock-repeat loop. Mutices of unrelated locks can be taken only - * by try-locking. - * - * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). - */ - struct mutex cll_guard; - cfs_task_t *cll_guarder; + enum cl_lock_state cll_state; + /** signals state changes. */ + wait_queue_head_t cll_wq; + /** + * Recursive lock, most fields in cl_lock{} are protected by this. + * + * Locking rules: this mutex is never held across network + * communication, except when lock is being canceled. + * + * Lock ordering: a mutex of a sub-lock is taken first, then a mutex + * on a top-lock. Other direction is implemented through a + * try-lock-repeat loop. Mutices of unrelated locks can be taken only + * by try-locking. + * + * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait(). + */ + struct mutex cll_guard; + struct task_struct *cll_guarder; int cll_depth; + /** + * the owner for INTRANSIT state + */ + struct task_struct *cll_intransit_owner; int cll_error; /** * Number of holds on a lock. A hold prevents a lock from being @@ -1555,7 +1580,11 @@ struct cl_lock { * * \see cl_lock_closure */ - struct list_head cll_inclosure; + cfs_list_t cll_inclosure; + /** + * Confict lock at queuing time. + */ + struct cl_lock *cll_conflict; /** * A list of references to this lock, for debugging. */ @@ -1564,13 +1593,13 @@ struct cl_lock { * A list of holds on this lock, for debugging. */ struct lu_ref cll_holders; - /** - * A reference for cl_lock::cll_descr::cld_obj. For debugging. - */ - struct lu_ref_link *cll_obj_ref; + /** + * A reference for cl_lock::cll_descr::cld_obj. For debugging. + */ + struct lu_ref_link cll_obj_ref; #ifdef CONFIG_LOCKDEP - /* "dep_map" name is assumed by lockdep.h macros. */ - struct lockdep_map dep_map; + /* "dep_map" name is assumed by lockdep.h macros. */ + struct lockdep_map dep_map; #endif }; @@ -1586,7 +1615,7 @@ struct cl_lock_slice { struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ - struct list_head cls_linkage; + cfs_list_t cls_linkage; }; /** @@ -1596,9 +1625,11 @@ struct cl_lock_slice { */ enum cl_lock_transition { /** operation cannot be completed immediately. Wait for state change. */ - CLO_WAIT = 1, + CLO_WAIT = 1, /** operation had to release lock mutex, restart. */ - CLO_REPEAT = 2 + CLO_REPEAT = 2, + /** lower layer re-enqueued. */ + CLO_REENQUEUED = 3, }; /** @@ -1658,8 +1689,9 @@ struct cl_lock_operations { * usual return values of lock state-machine methods, this can return * -ESTALE to indicate that lock cannot be returned to the cache, and * has to be re-initialized. + * unuse is a one-shot operation, so it must NOT return CLO_WAIT. * - * \see ccc_lock_unlock(), lov_lock_unlock(), osc_lock_unlock() + * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse() */ int (*clo_unuse)(const struct lu_env *env, const struct cl_lock_slice *slice); @@ -1731,7 +1763,7 @@ struct cl_lock_operations { const struct cl_lock_slice *slice, struct cl_lock_closure *closure); /** - * Executed top-to-bottom when lock description changes (e.g., as a + * Executed bottom-to-top when lock description changes (e.g., as a * result of server granting more generous lock than was requested). * * \see lovsub_lock_modify() @@ -1768,14 +1800,21 @@ struct cl_lock_operations { #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ - \ - if (cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_lock_print(env, &__info, lu_cdebug_printer, lock); \ + if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ + cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) +#define CL_LOCK_ASSERT(expr, env, lock) do { \ + if (likely(expr)) \ + break; \ + \ + CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \ + LBUG(); \ +} while (0) + /** @} cl_lock */ /** \addtogroup cl_page_list cl_page_list @@ -1798,12 +1837,12 @@ do { \ * @{ */ struct cl_page_list { - unsigned pl_nr; - struct list_head pl_pages; - cfs_task_t *pl_owner; + unsigned pl_nr; + cfs_list_t pl_pages; + struct task_struct *pl_owner; }; -/** \addtogroup cl_page_list cl_page_list +/** * A 2-queue of pages. A convenience data-type for common use case, 2-queue * contains an incoming page list and an outgoing page list. */ @@ -1872,13 +1911,18 @@ enum cl_io_type { CIT_READ, /** write system call */ CIT_WRITE, - /** truncate system call */ - CIT_TRUNC, + /** truncate, utime system calls */ + CIT_SETATTR, /** * page fault handling */ CIT_FAULT, /** + * fsync system call handling + * To write out a range of file + */ + CIT_FSYNC, + /** * Miscellaneous io. This is used for occasional io activity that * doesn't fit into other types. Currently this is used for: * @@ -1925,11 +1969,6 @@ enum cl_io_state { CIS_FINI }; -enum cl_req_priority { - CRP_NORMAL, - CRP_CANCEL -}; - /** * IO state private for a layer. * @@ -1948,9 +1987,11 @@ struct cl_io_slice { * linkage into a list of all slices for a given cl_io, hanging off * cl_io::ci_layers. Immutable after creation. */ - struct list_head cis_linkage; + cfs_list_t cis_linkage; }; +typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, + struct cl_page *); /** * Per-layer io operations. @@ -2035,21 +2076,28 @@ struct cl_io_operations { void (*cio_fini) (const struct lu_env *env, const struct cl_io_slice *slice); } op[CIT_OP_NR]; - struct { - /** - * Submit pages from \a queue->c2_qin for IO, and move - * successfully submitted pages into \a queue->c2_qout. Return - * non-zero if failed to submit even the single page. If - * submission failed after some pages were moved into \a - * queue->c2_qout, completion callback with non-zero ioret is - * executed on them. - */ - int (*cio_submit)(const struct lu_env *env, - const struct cl_io_slice *slice, - enum cl_req_type crt, - struct cl_2queue *queue, - enum cl_req_priority priority); - } req_op[CRT_NR]; + + /** + * Submit pages from \a queue->c2_qin for IO, and move + * successfully submitted pages into \a queue->c2_qout. Return + * non-zero if failed to submit even the single page. If + * submission failed after some pages were moved into \a + * queue->c2_qout, completion callback with non-zero ioret is + * executed on them. + */ + int (*cio_submit)(const struct lu_env *env, + const struct cl_io_slice *slice, + enum cl_req_type crt, + struct cl_2queue *queue); + /** + * Queue async page for write. + * The difference between cio_submit and cio_queue is that + * cio_submit is for urgent request. + */ + int (*cio_commit_async)(const struct lu_env *env, + const struct cl_io_slice *slice, + struct cl_page_list *queue, int from, int to, + cl_commit_cbt cb); /** * Read missing page. * @@ -2062,31 +2110,6 @@ struct cl_io_operations { const struct cl_io_slice *slice, const struct cl_page_slice *page); /** - * Prepare write of a \a page. Called bottom-to-top by a top-level - * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for - * get data from user-level buffer. - * - * \pre io->ci_type == CIT_WRITE - * - * \see vvp_io_prepare_write(), lov_io_prepare_write(), - * osc_io_prepare_write(). - */ - int (*cio_prepare_write)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page, - unsigned from, unsigned to); - /** - * - * \pre io->ci_type == CIT_WRITE - * - * \see vvp_io_commit_write(), lov_io_commit_write(), - * osc_io_commit_write(). - */ - int (*cio_commit_write)(const struct lu_env *env, - const struct cl_io_slice *slice, - const struct cl_page_slice *page, - unsigned from, unsigned to); - /** * Optional debugging helper. Print given io slice. */ int (*cio_print)(const struct lu_env *env, void *cookie, @@ -2134,9 +2157,13 @@ enum cl_enq_flags { */ CEF_NEVER = 0x00000010, /** + * for async glimpse lock. + */ + CEF_AGL = 0x00000020, + /** * mask of enq_flags. */ - CEF_MASK = 0x0000001f + CEF_MASK = 0x0000003f, }; /** @@ -2145,14 +2172,9 @@ enum cl_enq_flags { */ struct cl_io_lock_link { /** linkage into one of cl_lockset lists. */ - struct list_head cill_linkage; + cfs_list_t cill_linkage; struct cl_lock_descr cill_descr; struct cl_lock *cill_lock; - /** - * flags to enqueue lock for this IO. A combination of bit-flags from - * enum cl_enq_flags. - */ - __u32 cill_enq_flags; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, struct cl_io_lock_link *link); @@ -2189,11 +2211,11 @@ struct cl_io_lock_link { */ struct cl_lockset { /** locks to be acquired. */ - struct list_head cls_todo; + cfs_list_t cls_todo; /** locks currently being processed. */ - struct list_head cls_curr; + cfs_list_t cls_curr; /** locks acquired. */ - struct list_head cls_done; + cfs_list_t cls_done; }; /** @@ -2209,12 +2231,25 @@ enum cl_io_lock_dmd { CILR_NEVER }; +enum cl_fsync_mode { + /** start writeback, do not wait for them to finish */ + CL_FSYNC_NONE = 0, + /** start writeback and wait for them to finish */ + CL_FSYNC_LOCAL = 1, + /** discard all of dirty pages in a specific file range */ + CL_FSYNC_DISCARD = 2, + /** start writeback and make sure they have reached storage before + * return. OST_SYNC RPC must be issued and finished */ + CL_FSYNC_ALL = 3 +}; + struct cl_io_rw_common { loff_t crw_pos; size_t crw_count; int crw_nonblock; }; + /** * State for io. * @@ -2237,52 +2272,90 @@ struct cl_io { */ struct cl_io *ci_parent; /** List of slices. Immutable after creation. */ - struct list_head ci_layers; + cfs_list_t ci_layers; /** list of locks (to be) acquired by this io. */ struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; - /** - * This io has held grouplock, to inform sublayers that - * don't do lockless i/o. - */ - int ci_no_srvlock; union { struct cl_rd_io { struct cl_io_rw_common rd; - int rd_is_sendfile; } ci_rd; struct cl_wr_io { struct cl_io_rw_common wr; int wr_append; + int wr_sync; } ci_wr; struct cl_io_rw_common ci_rw; - struct cl_truncate_io { - /** new size to which file is truncated */ - size_t tr_size; - struct obd_capa *tr_capa; - } ci_truncate; + struct cl_setattr_io { + struct ost_lvb sa_attr; + unsigned int sa_valid; + struct obd_capa *sa_capa; + } ci_setattr; struct cl_fault_io { /** page index within file. */ pgoff_t ft_index; /** bytes valid byte on a faulted page. */ int ft_nob; - /** writable page? */ + /** writable page? for nopage() only */ int ft_writable; /** page of an executable? */ int ft_executable; + /** page_mkwrite() */ + int ft_mkwrite; /** resulting page */ struct cl_page *ft_page; } ci_fault; + struct cl_fsync_io { + loff_t fi_start; + loff_t fi_end; + struct obd_capa *fi_capa; + /** file system level fid */ + struct lu_fid *fi_fid; + enum cl_fsync_mode fi_mode; + /* how many pages were written/discarded */ + unsigned int fi_nr_written; + } ci_fsync; } u; struct cl_2queue ci_queue; size_t ci_nob; int ci_result; - int ci_continue; - /** - * Number of pages owned by this IO. For invariant checking. - */ - unsigned ci_owned_nr; + unsigned int ci_continue:1, + /** + * This io has held grouplock, to inform sublayers that + * don't do lockless i/o. + */ + ci_no_srvlock:1, + /** + * The whole IO need to be restarted because layout has been changed + */ + ci_need_restart:1, + /** + * to not refresh layout - the IO issuer knows that the layout won't + * change(page operations, layout change causes all page to be + * discarded), or it doesn't matter if it changes(sync). + */ + ci_ignore_layout:1, + /** + * Check if layout changed after the IO finishes. Mainly for HSM + * requirement. If IO occurs to openning files, it doesn't need to + * verify layout because HSM won't release openning files. + * Right now, only two opertaions need to verify layout: glimpse + * and setattr. + */ + ci_verify_layout:1, + /** + * file is released, restore has to to be triggered by vvp layer + */ + ci_restore_needed:1, + /** + * O_NOATIME + */ + ci_noatime:1; + /** + * Number of pages owned by this IO. For invariant checking. + */ + unsigned ci_owned_nr; }; /** @} cl_io */ @@ -2353,10 +2426,12 @@ struct cl_io { * Per-transfer attributes. */ struct cl_req_attr { - /** Generic attributes for the server consumption. */ - struct obdo *cra_oa; - /** Capability. */ - struct obd_capa *cra_capa; + /** Generic attributes for the server consumption. */ + struct obdo *cra_oa; + /** Capability. */ + struct obd_capa *cra_capa; + /** Jobid */ + char cra_jobid[JOBSTATS_JOBID_SIZE]; }; /** @@ -2402,11 +2477,11 @@ struct cl_req_operations { * A per-object state that (potentially multi-object) transfer request keeps. */ struct cl_req_obj { - /** object itself */ - struct cl_object *ro_obj; - /** reference to cl_req_obj::ro_obj. For debugging. */ - struct lu_ref_link *ro_obj_ref; - /* something else? Number of pages for a given object? */ + /** object itself */ + struct cl_object *ro_obj; + /** reference to cl_req_obj::ro_obj. For debugging. */ + struct lu_ref_link ro_obj_ref; + /* something else? Number of pages for a given object? */ }; /** @@ -2435,16 +2510,16 @@ struct cl_req_obj { * req's pages. */ struct cl_req { - enum cl_req_type crq_type; + enum cl_req_type crq_type; /** A list of pages being transfered */ - struct list_head crq_pages; + cfs_list_t crq_pages; /** Number of pages in cl_req::crq_pages */ - unsigned crq_nrpages; + unsigned crq_nrpages; /** An array of objects which pages are in ->crq_pages */ - struct cl_req_obj *crq_o; + struct cl_req_obj *crq_o; /** Number of elements in cl_req::crq_objs[] */ - unsigned crq_nrobjs; - struct list_head crq_layers; + unsigned crq_nrobjs; + cfs_list_t crq_layers; }; /** @@ -2453,28 +2528,35 @@ struct cl_req { struct cl_req_slice { struct cl_req *crs_req; struct cl_device *crs_dev; - struct list_head crs_linkage; + cfs_list_t crs_linkage; const struct cl_req_operations *crs_ops; }; /* @} cl_req */ +enum cache_stats_item { + /** how many cache lookups were performed */ + CS_lookup = 0, + /** how many times cache lookup resulted in a hit */ + CS_hit, + /** how many entities are in the cache right now */ + CS_total, + /** how many entities in the cache are actively used (and cannot be + * evicted) right now */ + CS_busy, + /** how many entities were created at all */ + CS_create, + CS_NR +}; + +#define CS_NAMES { "lookup", "hit", "total", "busy", "create" } + /** * Stats for a generic cache (similar to inode, lu_object, etc. caches). */ struct cache_stats { const char *cs_name; - /** how many entities were created at all */ - atomic_t cs_created; - /** how many cache lookups were performed */ - atomic_t cs_lookup; - /** how many times cache lookup resulted in a hit */ - atomic_t cs_hit; - /** how many entities are in the cache right now */ - atomic_t cs_total; - /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ - atomic_t cs_busy; + cfs_atomic_t cs_stats[CS_NR]; }; /** These are not exported so far */ @@ -2500,8 +2582,8 @@ struct cl_site { */ struct cache_stats cs_pages; struct cache_stats cs_locks; - atomic_t cs_pages_state[CPS_NR]; - atomic_t cs_locks_state[CLS_NR]; + cfs_atomic_t cs_pages_state[CPS_NR]; + cfs_atomic_t cs_locks_state[CLS_NR]; }; int cl_site_init (struct cl_site *s, struct cl_device *top); @@ -2627,6 +2709,7 @@ int cl_conf_set (const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); void cl_object_prune (const struct lu_env *env, struct cl_object *obj); void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_has_locks (struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2636,21 +2719,47 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) return cl_object_header(o0) == cl_object_header(o1); } +static inline void cl_object_page_init(struct cl_object *clob, int size) +{ + clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize; + cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8); +} + +static inline void *cl_object_page_slice(struct cl_object *clob, + struct cl_page *page) +{ + return (void *)((char *)page + clob->co_slice_off); +} + +/** + * Return refcount of cl_object. + */ +static inline int cl_object_refc(struct cl_object *clob) +{ + struct lu_object_header *header = clob->co_lu.lo_header; + return cfs_atomic_read(&header->loh_ref); +} + /** @} cl_object */ /** \defgroup cl_page cl_page * @{ */ -struct cl_page *cl_page_lookup(struct cl_object_header *hdr, - pgoff_t index); -void cl_page_gang_lookup(const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - struct cl_page_list *plist); +enum { + CLP_GANG_OKAY = 0, + CLP_GANG_RESCHED, + CLP_GANG_AGAIN, + CLP_GANG_ABORT +}; +/* callback of cl_page_gang_lookup() */ + struct cl_page *cl_page_find (const struct lu_env *env, struct cl_object *obj, pgoff_t idx, struct page *vmpage, enum cl_page_type type); +struct cl_page *cl_page_alloc (const struct lu_env *env, + struct cl_object *o, pgoff_t ind, + struct page *vmpage, + enum cl_page_type type); void cl_page_get (struct cl_page *page); void cl_page_put (const struct lu_env *env, struct cl_page *page); @@ -2660,11 +2769,10 @@ void cl_page_print (const struct lu_env *env, void *cookie, void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); -cfs_page_t *cl_page_vmpage (const struct lu_env *env, +struct page *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); -struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); +struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); -int cl_is_page (const void *addr); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2678,6 +2786,8 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, int cl_page_own (const struct lu_env *env, struct cl_io *io, struct cl_page *page); +int cl_page_own_try (const struct lu_env *env, + struct cl_io *io, struct cl_page *page); void cl_page_assume (const struct lu_env *env, struct cl_io *io, struct cl_page *page); void cl_page_unassume (const struct lu_env *env, @@ -2706,6 +2816,8 @@ int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, void cl_page_clip (const struct lu_env *env, struct cl_page *pg, int from, int to); int cl_page_cancel (const struct lu_env *env, struct cl_page *page); +int cl_page_flush (const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2718,8 +2830,6 @@ int cl_page_cancel (const struct lu_env *env, struct cl_page *page); void cl_page_discard (const struct lu_env *env, struct cl_io *io, struct cl_page *pg); void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_unmap (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); int cl_page_is_vmlocked (const struct lu_env *env, const struct cl_page *pg); void cl_page_export (const struct lu_env *env, @@ -2751,11 +2861,21 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, const char *scope, const void *source); struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, - __u32 enqflags, const char *scope, const void *source); -struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct cl_lock *except, - int pending, int canceld); +struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, + struct cl_object *obj, pgoff_t index, + struct cl_lock *except, int pending, + int canceld); +static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, + struct cl_object *obj, + struct cl_page *page, + struct cl_lock *except, + int pending, int canceld) +{ + LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj)); + return cl_lock_at_pgoff(env, obj, page->cp_index, except, + pending, canceld); +} const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); @@ -2765,14 +2885,23 @@ void cl_lock_get_trust (struct cl_lock *lock); void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); +void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_compatible(const struct cl_lock *lock1, - const struct cl_lock *lock2); +void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); + +enum cl_lock_state cl_lock_intransit(const struct lu_env *env, + struct cl_lock *lock); +void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); +int cl_lock_is_intransit(struct cl_lock *lock); + +int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, + int keep_mutex); /** \name statemachine statemachine * Interface to lock state machine consists of 3 parts: @@ -2814,14 +2943,15 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 flags); int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock); +int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); + /** @} statemachine */ void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, enum cl_lock_state state); -int cl_queue_match (const struct list_head *queue, +int cl_queue_match (const cfs_list_t *queue, const struct cl_lock_descr *need); void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); @@ -2829,8 +2959,7 @@ int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock); void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_mutexed (struct cl_lock *lock); int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_page_out (const struct lu_env *env, struct cl_lock *lock, - int discard); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); int cl_lock_ext_match (const struct cl_lock_descr *has, const struct cl_lock_descr *need); int cl_lock_descr_match(const struct cl_lock_descr *has, @@ -2854,7 +2983,6 @@ void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); -int cl_is_lock (const void *addr); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -2881,19 +3009,17 @@ void cl_io_end (const struct lu_env *env, struct cl_io *io); int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr, int enqflags); + struct cl_lock_descr *descr); int cl_io_read_page (const struct lu_env *env, struct cl_io *io, struct cl_page *page); -int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority); + enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority, long timeout); + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); +int cl_io_commit_async (const struct lu_env *env, struct cl_io *io, + struct cl_page_list *queue, int from, int to, + cl_commit_cbt cb); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, @@ -2908,7 +3034,24 @@ static inline int cl_io_is_append(const struct cl_io *io) return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } -int cl_io_is_sendfile(const struct cl_io *io); +static inline int cl_io_is_sync_write(const struct cl_io *io) +{ + return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync; +} + +static inline int cl_io_is_mkwrite(const struct cl_io *io) +{ + return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; +} + +/** + * True, iff \a io is a truncate(2). + */ +static inline int cl_io_is_trunc(const struct cl_io *io) +{ + return io->ci_type == CIT_SETATTR && + (io->u.ci_setattr.sa_valid & ATTR_SIZE); +} struct cl_io *cl_io_top(struct cl_io *io); @@ -2930,21 +3073,38 @@ do { \ * @{ */ /** + * Last page in the page list. + */ +static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) +{ + LASSERT(plist->pl_nr > 0); + return cfs_list_entry(plist->pl_pages.prev, struct cl_page, cp_batch); +} + +static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist) +{ + LASSERT(plist->pl_nr > 0); + return cfs_list_entry(plist->pl_pages.next, struct cl_page, cp_batch); +} + +/** * Iterate over pages in a page list. */ #define cl_page_list_for_each(page, list) \ - list_for_each_entry((page), &(list)->pl_pages, cp_batch) + cfs_list_for_each_entry((page), &(list)->pl_pages, cp_batch) /** * Iterate over pages in a page list, taking possible removals into account. */ #define cl_page_list_for_each_safe(page, temp, list) \ - list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) + cfs_list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) void cl_page_list_init (struct cl_page_list *plist); void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, struct cl_page *page); +void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); void cl_page_list_splice (struct cl_page_list *list, struct cl_page_list *head); void cl_page_list_del (const struct lu_env *env, @@ -2957,8 +3117,6 @@ void cl_page_list_assume (const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); void cl_page_list_discard(const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist); -int cl_page_list_unmap (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist); void cl_2queue_init (struct cl_2queue *queue); @@ -2997,12 +3155,14 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); * anchor and wakes up waiting thread when transfer is complete. */ struct cl_sync_io { - /** number of pages yet to be transferred. */ - atomic_t csi_sync_nr; - /** completion to be signaled when transfer is complete. */ - cfs_waitq_t csi_waitq; - /** error code. */ - int csi_sync_rc; + /** number of pages yet to be transferred. */ + cfs_atomic_t csi_sync_nr; + /** error code. */ + int csi_sync_rc; + /** barrier of destroy this structure */ + cfs_atomic_t csi_barrier; + /** completion to be signaled when transfer is complete. */ + wait_queue_head_t csi_waitq; }; void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages); @@ -3068,6 +3228,8 @@ void cl_env_reexit (void *cookie); void cl_env_implant (struct lu_env *env, int *refcheck); void cl_env_unplant (struct lu_env *env, int *refcheck); unsigned cl_env_cache_purge(unsigned nr); +struct lu_env *cl_env_percpu_get (void); +void cl_env_percpu_put (struct lu_env *env); /** @} cl_env */ @@ -3082,4 +3244,7 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, struct lu_device *next); /** @} clio */ +int cl_global_init(void); +void cl_global_fini(void); + #endif /* _LINUX_CL_OBJECT_H */