X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_internal.h;h=44b944c6ca787ed036b767cb8c98d9661bd1c4ad;hp=d2c15062dd773998b6356723824f59d80159f73a;hb=6052cc88eb1232ac3b0193f0d47881887a2dcfdc;hpb=65701b4a30efdb695776bcf690a2b3cabc928da1 diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index d2c1506..44b944c 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -29,181 +23,237 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ -#define MAX_STRING_SIZE 128 +extern int ldlm_srv_namespace_nr; +extern int ldlm_cli_namespace_nr; +extern struct mutex ldlm_srv_namespace_lock; +extern struct list_head ldlm_srv_namespace_list; +extern struct mutex ldlm_cli_namespace_lock; +extern struct list_head ldlm_cli_active_namespace_list; +extern struct list_head ldlm_cli_inactive_namespace_list; +extern unsigned int ldlm_cancel_unused_locks_before_replay; +extern struct kmem_cache *ldlm_glimpse_work_kmem; + +static inline int ldlm_namespace_nr_read(enum ldlm_side client) +{ + return client == LDLM_NAMESPACE_SERVER ? + ldlm_srv_namespace_nr : ldlm_cli_namespace_nr; +} -extern cfs_atomic_t ldlm_srv_namespace_nr; -extern cfs_atomic_t ldlm_cli_namespace_nr; -extern cfs_semaphore_t ldlm_srv_namespace_lock; -extern cfs_list_t ldlm_srv_namespace_list; -extern cfs_semaphore_t ldlm_cli_namespace_lock; -extern cfs_list_t ldlm_cli_namespace_list; +static inline void ldlm_namespace_nr_inc(enum ldlm_side client) +{ + if (client == LDLM_NAMESPACE_SERVER) + ldlm_srv_namespace_nr++; + else + ldlm_cli_namespace_nr++; +} -static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client) +static inline void ldlm_namespace_nr_dec(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr; + if (client == LDLM_NAMESPACE_SERVER) + ldlm_srv_namespace_nr--; + else + ldlm_cli_namespace_nr--; } -static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client) +static inline struct list_head *ldlm_namespace_list(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_list : &ldlm_cli_namespace_list; + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list; } -static inline cfs_semaphore_t *ldlm_namespace_lock(ldlm_side_t client) +static inline +struct list_head *ldlm_namespace_inactive_list(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list; } -/* ldlm_request.c */ -typedef enum { - LDLM_ASYNC, - LDLM_SYNC, -} ldlm_sync_t; +static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; +} +/* ns_bref is the number of resources in this namespace */ +static inline int ldlm_ns_empty(struct ldlm_namespace *ns) +{ + return atomic_read(&ns->ns_bref) == 0; +} + +void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, + enum ldlm_side); +void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, + enum ldlm_side); +struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side); + +/* ldlm_request.c */ /* Cancel lru flag, it indicates we cancel aged locks. */ -enum { - LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */ - LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */ - LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ - LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ - LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) */ +enum ldlm_lru_flags { + LDLM_LRU_FLAG_NO_WAIT = 0x1, /* Cancel locks w/o blocking (neither + * sending nor waiting for any RPCs) */ + LDLM_LRU_FLAG_CLEANUP = 0x2, /* Used when clearing lru, tells + * prepare_lru_list to set discard flag + * on PR extent locks so we don't waste + * time saving pages that will be + * discarded momentarily */ }; -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, - int flags); +int ldlm_cancel_lru(struct ldlm_namespace *ns, int min, + enum ldlm_cancel_flags cancel_flags, + enum ldlm_lru_flags lru_flags); int ldlm_cancel_lru_local(struct ldlm_namespace *ns, - cfs_list_t *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags); -extern int ldlm_enqueue_min; -int ldlm_get_enq_timeout(struct ldlm_lock *lock); - + struct list_head *cancels, int min, int max, + enum ldlm_cancel_flags cancel_flags, + enum ldlm_lru_flags lru_flags); +extern unsigned int ldlm_enqueue_min; /* ldlm_resource.c */ -int ldlm_resource_putref_locked(struct ldlm_resource *res); +extern struct kmem_cache *ldlm_resource_slab; +extern struct kmem_cache *ldlm_lock_slab; +extern struct kmem_cache *ldlm_inodebits_slab; +extern struct kmem_cache *ldlm_interval_tree_slab; + void ldlm_resource_insert_lock_after(struct ldlm_lock *original, struct ldlm_lock *new); -void ldlm_namespace_free_prior(struct ldlm_namespace *ns, - struct obd_import *imp, int force); -void ldlm_namespace_free_post(struct ldlm_namespace *ns); -/* ldlm_lock.c */ +void ldlm_resource_insert_lock_before(struct ldlm_lock *original, + struct ldlm_lock *new); -struct ldlm_cb_set_arg { - int type; /* LDLM_BL_CALLBACK or LDLM_CP_CALLBACK */ - unsigned int threshold; /* threshold to wake up the waiting proc */ - cfs_atomic_t rpcs; /* # of inflight rpcs in set */ - cfs_atomic_t restart; - cfs_atomic_t refcount; - cfs_waitq_t waitq; -}; - -static inline void ldlm_csa_put(struct ldlm_cb_set_arg *arg) -{ - if (cfs_atomic_dec_and_test(&arg->refcount)) { - LASSERT(cfs_atomic_read(&arg->rpcs) == 0); - - OBD_FREE_PTR(arg); - } -} +/* ldlm_lock.c */ typedef enum { - LDLM_WORK_BL_AST, - LDLM_WORK_CP_AST, - LDLM_WORK_REVOKE_AST + LDLM_WORK_BL_AST, + LDLM_WORK_CP_AST, + LDLM_WORK_REVOKE_AST, + LDLM_WORK_GL_AST } ldlm_desc_ast_t; -void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list); +void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock); +void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list); +int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, + enum req_location loc, void *data, int size); struct ldlm_lock * ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, - ldlm_type_t type, ldlm_mode_t, - const struct ldlm_callback_suite *cbs, - void *data, __u32 lvb_len); -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, - void *cookie, int *flags); -void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); -void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); -void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); -void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode); + enum ldlm_type type, enum ldlm_mode mode, + const struct ldlm_callback_suite *cbs, + void *data, __u32 lvb_len, enum lvb_type lvb_type); +enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env, + struct ldlm_namespace *, + struct ldlm_lock **, + void *cookie, __u64 *flags); +void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - cfs_list_t *work_list); -int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue, - cfs_list_t *work_list); -int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list, - ldlm_desc_ast_t ast_type); -int ldlm_lock_remove_from_lru(struct ldlm_lock *lock); + struct list_head *work_list); +#ifdef HAVE_SERVER_SUPPORT +int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, + struct list_head *work_list, + enum ldlm_process_intention intention, + struct ldlm_lock *hint); +int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags, + struct list_head *rpc_list); +void ldlm_discard_bl_list(struct list_head *bl_list); +void ldlm_clear_blocking_lock(struct ldlm_lock *lock); +void ldlm_clear_blocking_data(struct ldlm_lock *lock); +#endif +int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, + ldlm_desc_ast_t ast_type); +int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq); +int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, ktime_t last_use); +#define ldlm_lock_remove_from_lru(lock) \ + ldlm_lock_remove_from_lru_check(lock, ktime_set(0, 0)) int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock); void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock); -void ldlm_lock_add_to_lru(struct ldlm_lock *lock); void ldlm_lock_touch_in_lru(struct ldlm_lock *lock); void ldlm_lock_destroy_nolock(struct ldlm_lock *lock); -void ldlm_cancel_locks_for_export(struct obd_export *export); +int ldlm_export_cancel_blocked_locks(struct obd_export *exp); +int ldlm_export_cancel_locks(struct obd_export *exp); +void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock); /* ldlm_lockd.c */ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); -int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count, int mode); +int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, + struct list_head *cancels, int count, + enum ldlm_cancel_flags cancel_flags); +int ldlm_bl_thread_wakeup(void); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); +void ldlm_bl_desc2lock(const struct ldlm_lock_desc *ld, struct ldlm_lock *lock); +#ifdef HAVE_SERVER_SUPPORT /* ldlm_plain.c */ -int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq, - ldlm_error_t *err, cfs_list_t *work_list); +int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +/* ldlm_inodebits.c */ +int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, + struct list_head *work_list); +int ldlm_reprocess_inodebits_queue(struct ldlm_resource *res, + struct list_head *queue, + struct list_head *work_list, + enum ldlm_process_intention intention, + struct ldlm_lock *hint); /* ldlm_extent.c */ -int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq, - ldlm_error_t *err, cfs_list_t *work_list); +int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +#endif +int ldlm_extent_alloc_lock(struct ldlm_lock *lock); void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock); void ldlm_extent_unlink_lock(struct ldlm_lock *lock); -/* ldlm_flock.c */ -int ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq, - ldlm_error_t *err, cfs_list_t *work_list); +int ldlm_inodebits_alloc_lock(struct ldlm_lock *lock); +void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head, + struct ldlm_lock *lock); +void ldlm_inodebits_unlink_lock(struct ldlm_lock *lock); -/* ldlm_inodebits.c */ -int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags, - int first_enq, ldlm_error_t *err, - cfs_list_t *work_list); +/* ldlm_flock.c */ +int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +int ldlm_init_flock_export(struct obd_export *exp); +void ldlm_destroy_flock_export(struct obd_export *exp); /* l_lock.c */ void l_check_ns_lock(struct ldlm_namespace *ns); void l_check_no_ns_lock(struct ldlm_namespace *ns); -extern cfs_proc_dir_entry_t *ldlm_svc_proc_dir; -extern cfs_proc_dir_entry_t *ldlm_type_proc_dir; +extern struct dentry *ldlm_svc_debugfs_dir; struct ldlm_state { struct ptlrpc_service *ldlm_cb_service; struct ptlrpc_service *ldlm_cancel_service; struct ptlrpc_client *ldlm_client; - struct ptlrpc_connection *ldlm_server_conn; struct ldlm_bl_pool *ldlm_bl_pool; }; /* interval tree, for LDLM_EXTENT. */ -extern cfs_mem_cache_t *ldlm_interval_slab; /* slab cache for ldlm_interval */ +extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */ extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l); extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l); -extern struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock); extern void ldlm_interval_free(struct ldlm_interval *node); /* this function must be called with res lock held */ static inline struct ldlm_extent * ldlm_interval_extent(struct ldlm_interval *node) { struct ldlm_lock *lock; - LASSERT(!cfs_list_empty(&node->li_group)); + LASSERT(!list_empty(&node->li_group)); - lock = cfs_list_entry(node->li_group.next, struct ldlm_lock, + lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy); return &lock->l_policy_data.l_extent; } @@ -217,78 +267,170 @@ enum ldlm_policy_res { LDLM_POLICY_SKIP_LOCK }; -typedef enum ldlm_policy_res ldlm_policy_res_t; - -#define LDLM_POOL_PROC_READER(var, type) \ - static int lprocfs_rd_##var(char *page, char **start, off_t off, \ - int count, int *eof, void *data) \ - { \ - struct ldlm_pool *pl = data; \ - type tmp; \ - \ - cfs_spin_lock(&pl->pl_lock); \ - tmp = pl->pl_##var; \ - cfs_spin_unlock(&pl->pl_lock); \ - \ - return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \ - } \ - struct __##var##__dummy_read {;} /* semicolon catcher */ - -#define LDLM_POOL_PROC_WRITER(var, type) \ - int lprocfs_wr_##var(struct file *file, const char *buffer, \ - unsigned long count, void *data) \ - { \ - struct ldlm_pool *pl = data; \ - type tmp; \ - int rc; \ - \ - rc = lprocfs_wr_uint(file, buffer, count, &tmp); \ - if (rc < 0) { \ - CERROR("Can't parse user input, rc = %d\n", rc); \ - return rc; \ - } \ - \ - cfs_spin_lock(&pl->pl_lock); \ - pl->pl_##var = tmp; \ - cfs_spin_unlock(&pl->pl_lock); \ - \ - return rc; \ - } \ - struct __##var##__dummy_write {;} /* semicolon catcher */ +#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v) +#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; } +#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v) +#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; } +#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v)) +#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b) + +#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \ + static ssize_t var##_show(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + type tmp; \ + \ + spin_lock(&pl->pl_lock); \ + tmp = pl->pl_##var; \ + spin_unlock(&pl->pl_lock); \ + \ + return LDLM_POOL_SYSFS_PRINT_##type(tmp); \ + } \ + struct __##var##__dummy_read {;} /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \ + static ssize_t var##_store(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buffer, \ + size_t count) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + unsigned long tmp; \ + int rc; \ + \ + rc = kstrtoul(buffer, 10, &tmp); \ + if (rc < 0) { \ + return rc; \ + } \ + \ + spin_lock(&pl->pl_lock); \ + LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ + spin_unlock(&pl->pl_lock); \ + \ + return count; \ + } \ + struct __##var##__dummy_write {; } /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \ + static ssize_t var##_show(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + \ + return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \ + } \ + struct __##var##__dummy_read {; } /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \ + static ssize_t var##_store(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buffer, \ + size_t count) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + unsigned long tmp; \ + int rc; \ + \ + rc = kstrtoul(buffer, 10, &tmp); \ + if (rc < 0) { \ + return rc; \ + } \ + \ + LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ + \ + return count; \ + } \ + struct __##var##__dummy_write {; } /* semicolon catcher */ + +static inline void +ldlm_add_var(struct ldebugfs_vars *vars, struct dentry *debugfs_entry, + const char *name, void *data, const struct file_operations *ops) +{ + vars->name = name; + vars->data = data; + vars->fops = ops; + ldebugfs_add_vars(debugfs_entry, vars, NULL); +} static inline int is_granted_or_cancelled(struct ldlm_lock *lock) { - int ret = 0; + int ret = 0; - lock_res_and_lock(lock); - if (((lock->l_req_mode == lock->l_granted_mode) && - !(lock->l_flags & LDLM_FL_CP_REQD)) || - (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL))) - ret = 1; - unlock_res_and_lock(lock); + lock_res_and_lock(lock); + ret = is_granted_or_cancelled_nolock(lock); + unlock_res_and_lock(lock); - return ret; + return ret; } -typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *, - ldlm_policy_data_t *); - -typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *, - ldlm_wire_policy_data_t *); - -void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); -void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, - ldlm_policy_data_t *lpolicy); -void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, - ldlm_wire_policy_data_t *wpolicy); +static inline bool is_bl_done(struct ldlm_lock *lock) +{ + bool bl_done = true; + + if (!ldlm_is_bl_done(lock)) { + lock_res_and_lock(lock); + bl_done = ldlm_is_bl_done(lock); + unlock_res_and_lock(lock); + } + + return bl_done; +} + +static inline bool is_lock_converted(struct ldlm_lock *lock) +{ + bool ret = 0; + + lock_res_and_lock(lock); + ret = (lock->l_policy_data.l_inodebits.cancel_bits == 0); + unlock_res_and_lock(lock); + + return ret; +} + +typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *, + union ldlm_policy_data *); +typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *, + union ldlm_wire_policy_data *); +void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); + +/* ldlm_reclaim.c */ +#ifdef HAVE_SERVER_SUPPORT +extern __u64 ldlm_reclaim_threshold; +extern __u64 ldlm_lock_limit; +extern __u64 ldlm_reclaim_threshold_mb; +extern __u64 ldlm_lock_limit_mb; +extern struct percpu_counter ldlm_granted_total; +#endif +int ldlm_reclaim_setup(void); +void ldlm_reclaim_cleanup(void); +void ldlm_reclaim_add(struct ldlm_lock *lock); +void ldlm_reclaim_del(struct ldlm_lock *lock); +bool ldlm_reclaim_full(void); + +static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, + const struct ldlm_res_id *res1) +{ + return memcmp(res0, res1, sizeof(*res0)) == 0; +}