X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_dlm.h;h=362abdfc79a034caf88b8ba59cc2d8e53119ef5e;hb=675dd06e429ee9551d0f874f3461ac3e5091c039;hp=368ee2105f23f71eba8bf6654cd2cd22078e9837;hpb=6098ea781eca0a82ae3c707c265079ebcfb31392;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 368ee21..362abdf 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -37,6 +37,11 @@ #ifndef _LUSTRE_DLM_H__ #define _LUSTRE_DLM_H__ +/** \defgroup ldlm ldlm + * + * @{ + */ + #if defined(__linux__) #include #elif defined(__APPLE__) @@ -51,7 +56,6 @@ #include #include #include -#include /* for obd_export, for LDLM_DEBUG */ #include /* for interval_node{}, ldlm_extent */ #include @@ -64,7 +68,7 @@ struct obd_device; /* 1.5 times the maximum 128 tasks available in VN mode */ #define LDLM_DEFAULT_LRU_SIZE 196 #else -#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) +#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus()) #endif #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000)) #define LDLM_CTIME_AGE_LIMIT (10) @@ -106,7 +110,7 @@ typedef enum { #define LDLM_FL_REPLAY 0x000100 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */ -#define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused */ +#define LDLM_FL_LOCAL_ONLY 0x000400 /* don't run the cancel callback under ldlm_cli_cancel_unused */ #define LDLM_FL_FAILED 0x000800 @@ -114,7 +118,7 @@ typedef enum { #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */ #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */ #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */ -#define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */ +/* was LDLM_FL_WARN until 2.0.0 0x008000 */ #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */ #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait @@ -154,11 +158,6 @@ typedef enum { /* Flags flags inherited from parent lock when doing intents. */ #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK) -/* These are flags that are mapped into the flags and ASTs of blocking locks */ -#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */ -/* Flags sent in AST lock_flags to be mapped into the receiving lock. */ -#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA) - /* completion ast to be executed */ #define LDLM_FL_CP_REQD 0x1000000 @@ -169,8 +168,7 @@ typedef enum { * w/o involving separate thread. in order to decrease cs rate */ #define LDLM_FL_ATOMIC_CB 0x4000000 -/* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */ -#define LDLM_FL_ASYNC 0x8000000 +/* was LDLM_FL_ASYNC until 2.0.0 0x8000000 */ /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir, * such that server send blocking ast for conflict locks to this client for @@ -188,6 +186,23 @@ typedef enum { /* measure lock contention and return -EUSERS if locking contention is high */ #define LDLM_FL_DENY_ON_CONTENTION 0x40000000 +/* These are flags that are mapped into the flags and ASTs of blocking locks */ +#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */ + +/* Flags sent in AST lock_flags to be mapped into the receiving lock. */ +#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA) + +/* + * -------------------------------------------------------------------------- + * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above + * 0x80000000 will not be sent over the wire. + * -------------------------------------------------------------------------- + */ + +/* Used for marking lock as an target for -EINTR while cp_ast sleep + * emulation + race with upcoming bl_ast. */ +#define LDLM_FL_FAIL_LOC 0x100000000ULL + /* The blocking callback is overloaded to perform two functions. These flags * indicate which operation should be performed. */ #define LDLM_CB_BLOCKING 1 @@ -201,6 +216,7 @@ typedef enum { #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW) #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP) #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL) +#define LCK_COMPAT_COS (LCK_COS) extern ldlm_mode_t lck_compat_array[]; @@ -260,15 +276,15 @@ struct ldlm_pool_ops { int (*po_setup)(struct ldlm_pool *pl, int limit); }; -/** - * One second for pools thread check interval. Each pool has own period. +/** + * One second for pools thread check interval. Each pool has own period. */ #define LDLM_POOLS_THREAD_PERIOD (1) -/** - * 5% margin for modest pools. See ldlm_pool.c for details. +/** + * ~6% margin for modest pools. See ldlm_pool.c for details. */ -#define LDLM_POOLS_MODEST_MARGIN (5) +#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4) /** * Default recalc period for server side pools in sec. @@ -292,27 +308,27 @@ struct ldlm_pool { /** * Lock for protecting slv/clv updates. */ - spinlock_t pl_lock; + cfs_spinlock_t pl_lock; /** * Number of allowed locks in in pool, both, client and server side. */ - atomic_t pl_limit; + cfs_atomic_t pl_limit; /** * Number of granted locks in */ - atomic_t pl_granted; + cfs_atomic_t pl_granted; /** * Grant rate per T. */ - atomic_t pl_grant_rate; + cfs_atomic_t pl_grant_rate; /** * Cancel rate per T. */ - atomic_t pl_cancel_rate; + cfs_atomic_t pl_cancel_rate; /** * Grant speed (GR-CR) per T. */ - atomic_t pl_grant_speed; + cfs_atomic_t pl_grant_speed; /** * Server lock volume. Protected by pl_lock. */ @@ -325,7 +341,7 @@ struct ldlm_pool { * Lock volume factor. SLV on client is calculated as following: * server_slv * lock_volume_factor. */ - atomic_t pl_lock_volume_factor; + cfs_atomic_t pl_lock_volume_factor; /** * Time when last slv from server was obtained. */ @@ -354,8 +370,9 @@ typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **, struct ldlm_valblock_ops { int (*lvbo_init)(struct ldlm_resource *res); - int (*lvbo_update)(struct ldlm_resource *res, struct lustre_msg *m, - int buf_idx, int increase); + int (*lvbo_update)(struct ldlm_resource *res, + struct ptlrpc_request *r, + int increase); }; typedef enum { @@ -364,14 +381,6 @@ typedef enum { } ldlm_appetite_t; /* - * Default value for ->ns_shrink_thumb. If lock is not extent one its cost - * is one page. Here we have 256 pages which is 1M on i386. Thus by default - * all extent locks which have more than 1M long extent will be kept in lru, - * others (including ibits locks) will be canceled on memory pressure event. - */ -#define LDLM_LOCK_SHRINK_THUMB 256 - -/* * Default values for the "max_nolock_size", "contention_time" and * "contended_locks" namespace tunables. */ @@ -404,8 +413,8 @@ struct ldlm_namespace { /** * Hash table for namespace. */ - struct list_head *ns_hash; - spinlock_t ns_hash_lock; + cfs_list_t *ns_hash; + cfs_spinlock_t ns_hash_lock; /** * Count of resources in the hash. @@ -415,39 +424,34 @@ struct ldlm_namespace { /** * All root resources in namespace. */ - struct list_head ns_root_list; + cfs_list_t ns_root_list; /** * Position in global namespace list. */ - struct list_head ns_list_chain; + cfs_list_t ns_list_chain; /** * All root resources in namespace. */ - struct list_head ns_unused_list; + cfs_list_t ns_unused_list; int ns_nr_unused; - spinlock_t ns_unused_lock; + cfs_spinlock_t ns_unused_lock; unsigned int ns_max_unused; unsigned int ns_max_age; - + unsigned int ns_timeouts; /** * Seconds. */ unsigned int ns_ctime_age_limit; /** - * Lower limit to number of pages in lock to keep it in cache. - */ - unsigned long ns_shrink_thumb; - - /** * Next debug dump, jiffies. */ cfs_time_t ns_next_dump; - atomic_t ns_locks; + cfs_atomic_t ns_locks; __u64 ns_resources; ldlm_res_policy ns_policy; struct ldlm_valblock_ops *ns_lvbo; @@ -457,13 +461,13 @@ struct ldlm_namespace { ldlm_appetite_t ns_appetite; /** - * If more than @ns_contented_locks found, the resource considered - * as contended. + * If more than \a ns_contended_locks found, the resource is considered + * to be contended. */ unsigned ns_contended_locks; /** - * The resource remembers contended state during @ns_contention_time, + * The resource remembers contended state during \a ns_contention_time, * in seconds. */ unsigned ns_contention_time; @@ -513,7 +517,7 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) * */ -#define RES_HASH_BITS 10 +#define RES_HASH_BITS 12 #define RES_HASH_SIZE (1UL << RES_HASH_BITS) #define RES_HASH_MASK (RES_HASH_SIZE - 1) @@ -530,7 +534,7 @@ typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock); /* Interval node data for each LDLM_EXTENT lock */ struct ldlm_interval { struct interval_node li_node; /* node for tree mgmt */ - struct list_head li_group; /* the locks which have the same + cfs_list_t li_group; /* the locks which have the same * policy - group of the policy */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -544,43 +548,53 @@ struct ldlm_interval_tree { struct interval_node *lit_root; /* actually ldlm_interval */ }; +#define LUSTRE_TRACKS_LOCK_EXP_REFS (1) + +/* Cancel flag. */ +typedef enum { + LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ + LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ + LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST + * in the same RPC */ +} ldlm_cancel_flags_t; + struct ldlm_lock { - /** + /** * Must be first in the structure. */ struct portals_handle l_handle; /** * Lock reference count. */ - atomic_t l_refc; - /** + cfs_atomic_t l_refc; + /** * Internal spinlock protects l_resource. we should hold this lock * first before grabbing res_lock. */ - spinlock_t l_lock; - /** - * ldlm_lock_change_resource() can change this. + cfs_spinlock_t l_lock; + /** + * ldlm_lock_change_resource() can change this. */ struct ldlm_resource *l_resource; - /** + /** * Protected by ns_hash_lock. List item for client side lru list. */ - struct list_head l_lru; - /** - * Protected by lr_lock, linkage to resource's lock queues. + cfs_list_t l_lru; + /** + * Protected by lr_lock, linkage to resource's lock queues. */ - struct list_head l_res_link; - /** - * Tree node for ldlm_extent. + cfs_list_t l_res_link; + /** + * Tree node for ldlm_extent. */ struct ldlm_interval *l_tree_node; - /** + /** * Protected by per-bucket exp->exp_lock_hash locks. Per export hash * of locks. */ - struct hlist_node l_exp_hash; - /** - * Protected by lr_lock. Requested mode. + cfs_hlist_node_t l_exp_hash; + /** + * Protected by lr_lock. Requested mode. */ ldlm_mode_t l_req_mode; /** @@ -620,7 +634,7 @@ struct ldlm_lock { /* * Protected by lr_lock. Various counters: readers, writers, etc. */ - __u32 l_flags; + __u64 l_flags; __u32 l_readers; __u32 l_writers; /* @@ -632,103 +646,119 @@ struct ldlm_lock { */ __u8 l_destroyed; - /** + /** * If the lock is granted, a process sleeps on this waitq to learn when * it's no longer in use. If the lock is not granted, a process sleeps - * on this waitq to learn when it becomes granted. + * on this waitq to learn when it becomes granted. */ cfs_waitq_t l_waitq; - struct timeval l_enqueued_time; + /** + * Seconds. it will be updated if there is any activity related to + * the lock, e.g. enqueue the lock or send block AST. + */ + cfs_time_t l_last_activity; /** - * Jiffies. Should be converted to time if needed. + * Jiffies. Should be converted to time if needed. */ cfs_time_t l_last_used; struct ldlm_extent l_req_extent; - /* - * Client-side-only members. + /* + * Client-side-only members. */ - - /** + + /** * Temporary storage for an LVB received during an enqueue operation. */ __u32 l_lvb_len; void *l_lvb_data; - void *l_lvb_swabber; void *l_ast_data; - spinlock_t l_extents_list_lock; - struct list_head l_extents_list; + cfs_spinlock_t l_extents_list_lock; + cfs_list_t l_extents_list; - struct list_head l_cache_locks_list; + cfs_list_t l_cache_locks_list; - /* - * Server-side-only members. + /* + * Server-side-only members. */ - /** + /** connection cookie for the client originated the operation. */ + __u64 l_client_cookie; + + /** * Protected by elt_lock. Callbacks pending. */ - struct list_head l_pending_chain; + cfs_list_t l_pending_chain; cfs_time_t l_callback_timeout; - /** - * Pid which created this lock. + /** + * Pid which created this lock. */ __u32 l_pid; - /** - * For ldlm_add_ast_work_item(). + /** + * For ldlm_add_ast_work_item(). */ - struct list_head l_bl_ast; - /** - * For ldlm_add_ast_work_item(). + cfs_list_t l_bl_ast; + /** + * For ldlm_add_ast_work_item(). */ - struct list_head l_cp_ast; - /** - * For ldlm_add_ast_work_item(). + cfs_list_t l_cp_ast; + /** + * For ldlm_add_ast_work_item(). */ - struct list_head l_rk_ast; + cfs_list_t l_rk_ast; struct ldlm_lock *l_blocking_lock; int l_bl_ast_run; - /** - * Protected by lr_lock, linkages to "skip lists". + /** + * Protected by lr_lock, linkages to "skip lists". */ - struct list_head l_sl_mode; - struct list_head l_sl_policy; + cfs_list_t l_sl_mode; + cfs_list_t l_sl_policy; struct lu_ref l_reference; +#if LUSTRE_TRACKS_LOCK_EXP_REFS + /* Debugging stuff for bug 20498, for tracking export + references. */ + /** number of export references taken */ + int l_exp_refs_nr; + /** link all locks referencing one export */ + cfs_list_t l_exp_refs_link; + /** referenced export object */ + struct obd_export *l_exp_refs_target; +#endif }; struct ldlm_resource { struct ldlm_namespace *lr_namespace; /* protected by ns_hash_lock */ - struct list_head lr_hash; + cfs_list_t lr_hash; struct ldlm_resource *lr_parent; /* 0 for a root resource */ - struct list_head lr_children; /* list head for child resources */ - struct list_head lr_childof; /* part of ns_root_list if root res, + cfs_list_t lr_children; /* list head for child resources */ + cfs_list_t lr_childof; /* part of ns_root_list if root res, * part of lr_children if child */ - spinlock_t lr_lock; + cfs_spinlock_t lr_lock; /* protected by lr_lock */ - struct list_head lr_granted; - struct list_head lr_converting; - struct list_head lr_waiting; + cfs_list_t lr_granted; + cfs_list_t lr_converting; + cfs_list_t lr_waiting; ldlm_mode_t lr_most_restr; ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */ struct ldlm_res_id lr_name; - atomic_t lr_refcount; + cfs_atomic_t lr_refcount; struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/ /* Server-side-only lock value block elements */ - struct semaphore lr_lvb_sem; + cfs_semaphore_t lr_lvb_sem; __u32 lr_lvb_len; void *lr_lvb_data; @@ -741,13 +771,13 @@ struct ldlm_resource { }; struct ldlm_ast_work { - struct ldlm_lock *w_lock; - int w_blocking; - struct ldlm_lock_desc w_desc; - struct list_head w_list; - int w_flags; - void *w_data; - int w_datalen; + struct ldlm_lock *w_lock; + int w_blocking; + struct ldlm_lock_desc w_desc; + cfs_list_t w_list; + int w_flags; + void *w_data; + int w_datalen; }; /* ldlm_enqueue parameters common */ @@ -769,7 +799,7 @@ extern char *ldlm_typename[]; extern char *ldlm_it2str(int it); #ifdef LIBCFS_DEBUG #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \ - CHECK_STACK(); \ + CFS_CHECK_STACK(); \ \ if (((level) & D_CANTMASK) != 0 || \ ((libcfs_debug & (level)) != 0 && \ @@ -787,13 +817,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask, ...) __attribute__ ((format (printf, 4, 5))); -#define LDLM_ERROR(lock, fmt, a...) do { \ +#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \ static cfs_debug_limit_state_t _ldlm_cdls; \ - ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock, \ + ldlm_lock_debug(&_ldlm_cdls, mask, lock, \ __FILE__, __FUNCTION__, __LINE__, \ "### " fmt , ##a); \ } while (0) +#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a) +#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a) + #define LDLM_DEBUG(lock, fmt, a...) do { \ ldlm_lock_debug(NULL, D_DLMTRACE, lock, \ __FILE__, __FUNCTION__, __LINE__, \ @@ -811,7 +844,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask, typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags, int first_enq, ldlm_error_t *err, - struct list_head *work_list); + cfs_list_t *work_list); /* * Iterators. @@ -831,7 +864,7 @@ int ldlm_namespace_foreach_res(struct ldlm_namespace *ns, ldlm_res_iterator_t iter, void *closure); int ldlm_replay_locks(struct obd_import *imp); -void ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, +int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_iterator_t iter, void *data); /* ldlm_flock.c */ @@ -853,7 +886,7 @@ int ldlm_handle_cancel(struct ptlrpc_request *req); int ldlm_request_cancel(struct ptlrpc_request *req, const struct ldlm_request *dlm_req, int first); int ldlm_del_waiting_lock(struct ldlm_lock *lock); -int ldlm_refresh_waiting_lock(struct ldlm_lock *lock); +int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout); void ldlm_revoke_export_locks(struct obd_export *exp); int ldlm_get_ref(void); void ldlm_put_ref(void); @@ -867,7 +900,6 @@ void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh); struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags); void ldlm_cancel_callback(struct ldlm_lock *); -int ldlm_lock_set_data(struct lustre_handle *, void *data); int ldlm_lock_remove_from_lru(struct ldlm_lock *); static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h) @@ -890,12 +922,11 @@ ldlm_handle2lock_long(const struct lustre_handle *h, int flags) } static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, - struct lustre_msg *m, int buf_idx, - int increase) + struct ptlrpc_request *r, int increase) { if (res->lr_namespace->ns_lvbo && res->lr_namespace->ns_lvbo->lvbo_update) { - return res->lr_namespace->ns_lvbo->lvbo_update(res, m, buf_idx, + return res->lr_namespace->ns_lvbo->lvbo_update(res, r, increase); } return 0; @@ -904,6 +935,9 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, int ldlm_error2errno(ldlm_error_t error); ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this * confuses user-space. */ +#if LUSTRE_TRACKS_LOCK_EXP_REFS +void ldlm_dump_export_locks(struct obd_export *exp); +#endif /** * Release a temporary lock reference obtained by ldlm_handle2lock() or @@ -933,17 +967,17 @@ do { \ lock; \ }) -#define ldlm_lock_list_put(head, member, count) \ -({ \ - struct ldlm_lock *_lock, *_next; \ - int c = count; \ - list_for_each_entry_safe(_lock, _next, head, member) { \ - if (c-- == 0) \ - break; \ - list_del_init(&_lock->member); \ - LDLM_LOCK_RELEASE(_lock); \ - } \ - LASSERT(c <= 0); \ +#define ldlm_lock_list_put(head, member, count) \ +({ \ + struct ldlm_lock *_lock, *_next; \ + int c = count; \ + cfs_list_for_each_entry_safe(_lock, _next, head, member) { \ + if (c-- == 0) \ + break; \ + cfs_list_del_init(&_lock->member); \ + LDLM_LOCK_RELEASE(_lock); \ + } \ + LASSERT(c <= 0); \ }) struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock); @@ -955,16 +989,15 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_allow_match(struct ldlm_lock *lock); -int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **); -void ldlm_lock_fast_release(void *, int); +void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, const struct ldlm_res_id *, ldlm_type_t type, ldlm_policy_data_t *, ldlm_mode_t mode, - struct lustre_handle *); + struct lustre_handle *, int unref); struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, __u32 *flags); +void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode); void ldlm_lock_cancel(struct ldlm_lock *lock); -void ldlm_cancel_locks_for_export(struct obd_export *export); void ldlm_reprocess_all(struct ldlm_resource *res); void ldlm_reprocess_all_ns(struct ldlm_namespace *ns); void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos); @@ -1000,7 +1033,8 @@ struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, ldlm_type_t type, int create); struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res); int ldlm_resource_putref(struct ldlm_resource *res); -void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, +void ldlm_resource_add_lock(struct ldlm_resource *res, + cfs_list_t *head, struct ldlm_lock *lock); void ldlm_resource_unlink_lock(struct ldlm_lock *lock); void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc); @@ -1011,7 +1045,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, const struct ldlm_res_id *); #define LDLM_RESOURCE_ADDREF(res) do { \ - lu_ref_add(&(res)->lr_reference, __FUNCTION__, cfs_current()); \ + lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \ } while (0) #define LDLM_RESOURCE_DELREF(res) do { \ @@ -1027,6 +1061,7 @@ struct ldlm_callback_suite { /* ldlm_request.c */ int ldlm_expired_completion_wait(void *data); +int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock); int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, void *data, int flag); int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp); @@ -1035,25 +1070,24 @@ int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data); int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, - ldlm_policy_data_t *policy, int *flags, - void *lvb, __u32 lvb_len, void *lvb_swabber, - struct lustre_handle *lockh, int async); + ldlm_policy_data_t const *policy, int *flags, + void *lvb, __u32 lvb_len, struct lustre_handle *lockh, + int async); int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req, - struct list_head *cancels, + cfs_list_t *cancels, int count); int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, int version, int opc, int canceloff, - struct list_head *cancels, int count); + cfs_list_t *cancels, int count); int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req, const struct ldlm_request *dlm_req, const struct ldlm_callback_suite *cbs); int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, int *flags, void *lvb, __u32 lvb_len, - void *lvb_swabber, struct lustre_handle *lockh, - int rc); + struct lustre_handle *lockh, int rc); int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_type_t type, ldlm_policy_data_t *policy, @@ -1061,7 +1095,8 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, ldlm_blocking_callback blocking, ldlm_completion_callback completion, ldlm_glimpse_callback glimpse, - void *data, __u32 lvb_len, void *lvb_swabber, + void *data, __u32 lvb_len, + const __u64 *client_cookie, struct lustre_handle *lockh); int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new, void *data, __u32 data_len); @@ -1071,20 +1106,24 @@ int ldlm_handle_convert0(struct ptlrpc_request *req, const struct ldlm_request *dlm_req); int ldlm_cli_cancel(struct lustre_handle *lockh); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - int flags, void *opaque); + ldlm_cancel_flags_t flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, int flags, void *opaque); -int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head, - int count, int flags); + ldlm_mode_t mode, + ldlm_cancel_flags_t flags, + void *opaque); +int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head, + int count, ldlm_cancel_flags_t flags); int ldlm_cancel_resource_local(struct ldlm_resource *res, - struct list_head *cancels, + cfs_list_t *cancels, ldlm_policy_data_t *policy, ldlm_mode_t mode, int lock_flags, - int cancel_flags, void *opaque); -int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, int flags); + ldlm_cancel_flags_t cancel_flags, void *opaque); +int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count, + ldlm_cancel_flags_t flags); +int ldlm_cli_cancel_list(cfs_list_t *head, int count, + struct ptlrpc_request *req, ldlm_cancel_flags_t flags); /* mds/handler.c */ /* This has to be here because recursive inclusion sucks. */ @@ -1113,19 +1152,19 @@ enum lock_res_type { static inline void lock_res(struct ldlm_resource *res) { - spin_lock(&res->lr_lock); + cfs_spin_lock(&res->lr_lock); } static inline void lock_res_nested(struct ldlm_resource *res, enum lock_res_type mode) { - spin_lock_nested(&res->lr_lock, mode); + cfs_spin_lock_nested(&res->lr_lock, mode); } static inline void unlock_res(struct ldlm_resource *res) { - spin_unlock(&res->lr_lock); + cfs_spin_unlock(&res->lr_lock); } static inline void check_res_locked(struct ldlm_resource *res) @@ -1157,4 +1196,7 @@ void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv); void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit); void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock); void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock); + +/** @} ldlm */ + #endif