X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_internal.h;h=517ab6091de5c389dae4f366eb5fde0041e6aaea;hp=3eeed95ecf7e075b2b95b091bb99e5076eeaf657;hb=05e6ccd344e7eba44e43230fa2fa0a1b3b6115c4;hpb=70e80ade90af09300396706b8910e196a7928520 diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index 3eeed95..517ab60 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -16,178 +14,235 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. */ -#define MAX_STRING_SIZE 128 - -extern atomic_t ldlm_srv_namespace_nr; -extern atomic_t ldlm_cli_namespace_nr; -extern struct semaphore ldlm_srv_namespace_lock; +extern int ldlm_srv_namespace_nr; +extern int ldlm_cli_namespace_nr; +extern struct mutex ldlm_srv_namespace_lock; extern struct list_head ldlm_srv_namespace_list; -extern struct semaphore ldlm_cli_namespace_lock; -extern struct list_head ldlm_cli_namespace_list; +extern struct mutex ldlm_cli_namespace_lock; +extern struct list_head ldlm_cli_active_namespace_list; +extern struct list_head ldlm_cli_inactive_namespace_list; +extern unsigned int ldlm_cancel_unused_locks_before_replay; +extern struct kmem_cache *ldlm_glimpse_work_kmem; -static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client) +static inline int ldlm_namespace_nr_read(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr; + return client == LDLM_NAMESPACE_SERVER ? + ldlm_srv_namespace_nr : ldlm_cli_namespace_nr; } -static inline struct list_head *ldlm_namespace_list(ldlm_side_t client) +static inline void ldlm_namespace_nr_inc(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_list : &ldlm_cli_namespace_list; + if (client == LDLM_NAMESPACE_SERVER) + ldlm_srv_namespace_nr++; + else + ldlm_cli_namespace_nr++; } -static inline struct semaphore *ldlm_namespace_lock(ldlm_side_t client) +static inline void ldlm_namespace_nr_dec(enum ldlm_side client) { - return client == LDLM_NAMESPACE_SERVER ? - &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; + if (client == LDLM_NAMESPACE_SERVER) + ldlm_srv_namespace_nr--; + else + ldlm_cli_namespace_nr--; } -/* ldlm_request.c */ -typedef enum { - LDLM_ASYNC, - LDLM_SYNC, -} ldlm_sync_t; - -/* Cancel lru flag, it indicates we cancel aged locks. */ -enum { - LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */ - LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */ - LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ - LDLM_CANCEL_LRUR = 1 << 3 /* Cancel locks from lru resize. */ -}; +static inline struct list_head *ldlm_namespace_list(enum ldlm_side client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list; +} -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, - int flags); -int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, - int count, int max, int cancel_flags, int flags); -int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns, int count, int max, - int flags); -extern int ldlm_enqueue_min; -int ldlm_get_enq_timeout(struct ldlm_lock *lock); +static inline +struct list_head *ldlm_namespace_inactive_list(enum ldlm_side client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list; +} + +static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client) +{ + return client == LDLM_NAMESPACE_SERVER ? + &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock; +} +/* ns_bref is the number of resources in this namespace */ +static inline int ldlm_ns_empty(struct ldlm_namespace *ns) +{ + return atomic_read(&ns->ns_bref) == 0; +} + +void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, + enum ldlm_side); +void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, + enum ldlm_side); +struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side); + +/* ldlm_request.c */ +int ldlm_cancel_lru(struct ldlm_namespace *ns, int min, + enum ldlm_cancel_flags cancel_flags, + enum ldlm_lru_flags lru_flags); +int ldlm_cancel_lru_local(struct ldlm_namespace *ns, + struct list_head *cancels, int min, int max, + enum ldlm_cancel_flags cancel_flags, + enum ldlm_lru_flags lru_flags); +extern unsigned int ldlm_enqueue_min; /* ldlm_resource.c */ -int ldlm_resource_putref_locked(struct ldlm_resource *res); +extern struct kmem_cache *ldlm_resource_slab; +extern struct kmem_cache *ldlm_lock_slab; +extern struct kmem_cache *ldlm_inodebits_slab; +extern struct kmem_cache *ldlm_interval_tree_slab; + void ldlm_resource_insert_lock_after(struct ldlm_lock *original, struct ldlm_lock *new); -void ldlm_namespace_free_prior(struct ldlm_namespace *ns, - struct obd_import *imp, int force); -void ldlm_namespace_free_post(struct ldlm_namespace *ns); -/* ldlm_lock.c */ +void ldlm_resource_insert_lock_before(struct ldlm_lock *original, + struct ldlm_lock *new); -/* Number of blocking/completion callbacks that will be sent in - * parallel (see bug 11301). */ -#define PARALLEL_AST_LIMIT 200 - -struct ldlm_cb_set_arg { - struct ptlrpc_request_set *set; - atomic_t restart; - __u32 type; /* LDLM_BL_CALLBACK or LDLM_CP_CALLBACK */ -}; +/* ldlm_lock.c */ typedef enum { - LDLM_WORK_BL_AST, - LDLM_WORK_CP_AST, - LDLM_WORK_REVOKE_AST -} ldlm_desc_ast_t; + LDLM_WORK_BL_AST, + LDLM_WORK_CP_AST, + LDLM_WORK_REVOKE_AST, + LDLM_WORK_GL_AST +} ldlm_desc_ast_t; +void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock); void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list); +int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, + enum req_location loc, void *data, int size); struct ldlm_lock * ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, - ldlm_type_t type, ldlm_mode_t, ldlm_blocking_callback, - ldlm_completion_callback, ldlm_glimpse_callback, void *data, - __u32 lvb_len); -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, - void *cookie, int *flags); -void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); -void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); + enum ldlm_type type, enum ldlm_mode mode, + const struct ldlm_callback_suite *cbs, + void *data, __u32 lvb_len, enum lvb_type lvb_type); +enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env, + struct ldlm_namespace *, + struct ldlm_lock **, + void *cookie, __u64 *flags); +void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode); +void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode); void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, - struct list_head *work_list); + struct list_head *work_list); +#ifdef HAVE_SERVER_SUPPORT int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue, - struct list_head *work_list); -int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type); -int ldlm_lock_remove_from_lru(struct ldlm_lock *lock); + struct list_head *work_list, + enum ldlm_process_intention intention, __u64 hint); +int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags, + struct list_head *rpc_list); +void ldlm_discard_bl_list(struct list_head *bl_list); +void ldlm_clear_blocking_lock(struct ldlm_lock *lock); +void ldlm_clear_blocking_data(struct ldlm_lock *lock); +#endif +int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, + ldlm_desc_ast_t ast_type); +int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq); +int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, ktime_t last_use); +#define ldlm_lock_remove_from_lru(lock) \ + ldlm_lock_remove_from_lru_check(lock, ktime_set(0, 0)) int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock); void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock); -void ldlm_lock_add_to_lru(struct ldlm_lock *lock); void ldlm_lock_touch_in_lru(struct ldlm_lock *lock); void ldlm_lock_destroy_nolock(struct ldlm_lock *lock); +int ldlm_export_cancel_blocked_locks(struct obd_export *exp); +int ldlm_export_cancel_locks(struct obd_export *exp); +void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock); + /* ldlm_lockd.c */ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); -int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - struct list_head *cancels, int count); +int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, + struct list_head *cancels, int count, + enum ldlm_cancel_flags cancel_flags); +int ldlm_bl_to_thread_ns(struct ldlm_namespace *ns); +int ldlm_bl_thread_wakeup(void); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); +void ldlm_bl_desc2lock(const struct ldlm_lock_desc *ld, struct ldlm_lock *lock); +#ifdef HAVE_SERVER_SUPPORT /* ldlm_plain.c */ -int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq, - ldlm_error_t *err, struct list_head *work_list); +int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +/* ldlm_inodebits.c */ +int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, + struct list_head *work_list); +int ldlm_reprocess_inodebits_queue(struct ldlm_resource *res, + struct list_head *queue, + struct list_head *work_list, + enum ldlm_process_intention intention, + __u64 hint); /* ldlm_extent.c */ -int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq, - ldlm_error_t *err, struct list_head *work_list); +int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +#endif +int ldlm_extent_alloc_lock(struct ldlm_lock *lock); void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock); void ldlm_extent_unlink_lock(struct ldlm_lock *lock); -/* ldlm_flock.c */ -int ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq, - ldlm_error_t *err, struct list_head *work_list); +int ldlm_inodebits_alloc_lock(struct ldlm_lock *lock); +void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head, + struct ldlm_lock *lock, bool tail); +void ldlm_inodebits_unlink_lock(struct ldlm_lock *lock); -/* ldlm_inodebits.c */ -int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags, - int first_enq, ldlm_error_t *err, - struct list_head *work_list); +/* ldlm_flock.c */ +int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); +int ldlm_init_flock_export(struct obd_export *exp); +void ldlm_destroy_flock_export(struct obd_export *exp); /* l_lock.c */ void l_check_ns_lock(struct ldlm_namespace *ns); void l_check_no_ns_lock(struct ldlm_namespace *ns); -extern cfs_proc_dir_entry_t *ldlm_svc_proc_dir; -extern cfs_proc_dir_entry_t *ldlm_type_proc_dir; +extern struct dentry *ldlm_svc_debugfs_dir; struct ldlm_state { struct ptlrpc_service *ldlm_cb_service; struct ptlrpc_service *ldlm_cancel_service; struct ptlrpc_client *ldlm_client; - struct ptlrpc_connection *ldlm_server_conn; struct ldlm_bl_pool *ldlm_bl_pool; }; /* interval tree, for LDLM_EXTENT. */ -extern cfs_mem_cache_t *ldlm_interval_slab; /* slab cache for ldlm_interval */ +extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */ extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l); extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l); -extern struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock); extern void ldlm_interval_free(struct ldlm_interval *node); /* this function must be called with res lock held */ static inline struct ldlm_extent * ldlm_interval_extent(struct ldlm_interval *node) { struct ldlm_lock *lock; - LASSERT(!list_empty(&node->li_group)); + LASSERT(!list_empty(&node->li_group)); - lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy); + lock = list_entry(node->li_group.next, struct ldlm_lock, + l_sl_policy); return &lock->l_policy_data.l_extent; } @@ -196,44 +251,174 @@ void ldlm_exit(void); enum ldlm_policy_res { LDLM_POLICY_CANCEL_LOCK, - LDLM_POLICY_KEEP_LOCK + LDLM_POLICY_KEEP_LOCK, + LDLM_POLICY_SKIP_LOCK }; -typedef enum ldlm_policy_res ldlm_policy_res_t; - -#define LDLM_POOL_PROC_READER(var, type) \ - static int lprocfs_rd_##var(char *page, char **start, off_t off, \ - int count, int *eof, void *data) \ - { \ - struct ldlm_pool *pl = data; \ - type tmp; \ - \ - spin_lock(&pl->pl_lock); \ - tmp = pl->pl_##var; \ - spin_unlock(&pl->pl_lock); \ - \ - return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \ - } \ - struct __##var##__dummy_read {;} /* semicolon catcher */ - -#define LDLM_POOL_PROC_WRITER(var, type) \ - int lprocfs_wr_##var(struct file *file, const char *buffer, \ - unsigned long count, void *data) \ - { \ - struct ldlm_pool *pl = data; \ - type tmp; \ - int rc; \ - \ - rc = lprocfs_wr_uint(file, buffer, count, &tmp); \ - if (rc) { \ - CERROR("Can't parse user input, rc = %d\n", rc); \ - return rc; \ - } \ - \ - spin_lock(&pl->pl_lock); \ - pl->pl_##var = tmp; \ - spin_unlock(&pl->pl_lock); \ - \ - return rc; \ - } \ - struct __##var##__dummy_write {;} /* semicolon catcher */ +#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v) +#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; } +#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v) +#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; } +#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v)) +#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b) + +#define LDLM_POOL_SYSFS_READER_SHOW(var, type) \ + static ssize_t var##_show(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + type tmp; \ + \ + spin_lock(&pl->pl_lock); \ + tmp = pl->pl_##var; \ + spin_unlock(&pl->pl_lock); \ + \ + return LDLM_POOL_SYSFS_PRINT_##type(tmp); \ + } \ + struct __##var##__dummy_read {;} /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \ + static ssize_t var##_store(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buffer, \ + size_t count) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + unsigned long tmp; \ + int rc; \ + \ + rc = kstrtoul(buffer, 10, &tmp); \ + if (rc < 0) { \ + return rc; \ + } \ + \ + spin_lock(&pl->pl_lock); \ + LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ + spin_unlock(&pl->pl_lock); \ + \ + return count; \ + } \ + struct __##var##__dummy_write {; } /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \ + static ssize_t var##_show(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + \ + return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \ + } \ + struct __##var##__dummy_read {; } /* semicolon catcher */ + +#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \ + static ssize_t var##_store(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buffer, \ + size_t count) \ + { \ + struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\ + pl_kobj); \ + unsigned long tmp; \ + int rc; \ + \ + rc = kstrtoul(buffer, 10, &tmp); \ + if (rc < 0) { \ + return rc; \ + } \ + \ + LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \ + \ + return count; \ + } \ + struct __##var##__dummy_write {; } /* semicolon catcher */ + +static inline void +ldlm_add_var(struct ldebugfs_vars *vars, struct dentry *debugfs_entry, + const char *name, void *data, const struct file_operations *ops) +{ + vars->name = name; + vars->data = data; + vars->fops = ops; + ldebugfs_add_vars(debugfs_entry, vars, NULL); +} + +static inline int is_granted_or_cancelled(struct ldlm_lock *lock) +{ + int ret = 0; + + lock_res_and_lock(lock); + ret = is_granted_or_cancelled_nolock(lock); + unlock_res_and_lock(lock); + + return ret; +} + +static inline bool is_bl_done(struct ldlm_lock *lock) +{ + bool bl_done = true; + + if (!ldlm_is_bl_done(lock)) { + lock_res_and_lock(lock); + bl_done = ldlm_is_bl_done(lock); + unlock_res_and_lock(lock); + } + + return bl_done; +} + +static inline bool is_lock_converted(struct ldlm_lock *lock) +{ + bool ret = 0; + + lock_res_and_lock(lock); + ret = (lock->l_policy_data.l_inodebits.cancel_bits == 0); + unlock_res_and_lock(lock); + + return ret; +} + +typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *, + union ldlm_policy_data *); +typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *, + union ldlm_wire_policy_data *); +void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); +void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy, + union ldlm_policy_data *lpolicy); +void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy, + union ldlm_wire_policy_data *wpolicy); + +/* ldlm_reclaim.c */ +#ifdef HAVE_SERVER_SUPPORT +extern __u64 ldlm_reclaim_threshold; +extern __u64 ldlm_lock_limit; +extern __u64 ldlm_reclaim_threshold_mb; +extern __u64 ldlm_lock_limit_mb; +extern struct percpu_counter ldlm_granted_total; +#endif +int ldlm_reclaim_setup(void); +void ldlm_reclaim_cleanup(void); +void ldlm_reclaim_add(struct ldlm_lock *lock); +void ldlm_reclaim_del(struct ldlm_lock *lock); +bool ldlm_reclaim_full(void); + +static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, + const struct ldlm_res_id *res1) +{ + return memcmp(res0, res1, sizeof(*res0)) == 0; +}