-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#endif
#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
#define LDLM_CTIME_AGE_LIMIT (10)
+#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
typedef enum {
ELDLM_OK = 0,
#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
#define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
#define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
-/* was LDLM_FL_WARN until 2.0.0 0x008000 */
#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
* indefinitely */
/* file & record locking */
-#define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked
+#define LDLM_FL_BLOCK_NOWAIT 0x040000 /* server told not to wait if blocked.
+ * For AGL, OST will not send glimpse
+ * callback. */
#define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
* list. */
#define LDLM_FL_KMS_IGNORE 0x200000
+/* Don't put lock into the LRU list, so that it is not canceled due to aging.
+ * Used by MGC locks, they are cancelled only at unmount or by callback. */
+#define LDLM_FL_NO_LRU 0x400000
+
/* Immediatelly cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This is
* for clients (like liblustre) that cannot be expected to reliably response
* w/o involving separate thread. in order to decrease cs rate */
#define LDLM_FL_ATOMIC_CB 0x4000000
-/* was LDLM_FL_ASYNC until 2.0.0 0x8000000 */
-
/* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
* such that server send blocking ast for conflict locks to this client for
* the 1st operation, whereas the 2nd operation has canceled this lock and
* led_lock
*
* lr_lock
- * ns_unused_lock
+ * ns_lock
*
- * lr_lvb_sem
+ * lr_lvb_mutex
* lr_lock
*
*/
*/
cfs_atomic_t pl_cancel_rate;
/**
- * Grant speed (GR-CR) per T.
- */
- cfs_atomic_t pl_grant_speed;
- /**
* Server lock volume. Protected by pl_lock.
*/
__u64 pl_server_lock_volume;
#define NS_DEFAULT_CONTENTION_SECONDS 2
#define NS_DEFAULT_CONTENDED_LOCKS 32
+struct ldlm_ns_bucket {
+ /** refer back */
+ struct ldlm_namespace *nsb_namespace;
+ /** estimated lock callback time */
+ struct adaptive_timeout nsb_at_estimate;
+};
+
+enum {
+ /** ldlm namespace lock stats */
+ LDLM_NSS_LOCKS = 0,
+ LDLM_NSS_LAST
+};
+
+typedef enum {
+ /** invalide type */
+ LDLM_NS_TYPE_UNKNOWN = 0,
+ /** mdc namespace */
+ LDLM_NS_TYPE_MDC,
+ /** mds namespace */
+ LDLM_NS_TYPE_MDT,
+ /** osc namespace */
+ LDLM_NS_TYPE_OSC,
+ /** ost namespace */
+ LDLM_NS_TYPE_OST,
+ /** mgc namespace */
+ LDLM_NS_TYPE_MGC,
+ /** mgs namespace */
+ LDLM_NS_TYPE_MGT,
+} ldlm_ns_type_t;
+
struct ldlm_namespace {
/**
- * Namespace name. Used for logging, etc.
+ * Backward link to obd, required for ldlm pool to store new SLV.
*/
- char *ns_name;
+ struct obd_device *ns_obd;
/**
* Is this a client-side lock tree?
ldlm_side_t ns_client;
/**
- * Namespce connect flags supported by server (may be changed via proc,
- * lru resize may be disabled/enabled).
+ * resource hash
*/
- __u64 ns_connect_flags;
+ cfs_hash_t *ns_rs_hash;
- /**
- * Client side orig connect flags supported by server.
- */
- __u64 ns_orig_connect_flags;
+ /**
+ * serialize
+ */
+ cfs_spinlock_t ns_lock;
/**
- * Hash table for namespace.
+ * big refcount (by bucket)
*/
- cfs_list_t *ns_hash;
- cfs_spinlock_t ns_hash_lock;
+ cfs_atomic_t ns_bref;
- /**
- * Count of resources in the hash.
- */
- __u32 ns_refcount;
+ /**
+ * Namespce connect flags supported by server (may be changed via proc,
+ * lru resize may be disabled/enabled).
+ */
+ __u64 ns_connect_flags;
/**
- * All root resources in namespace.
+ * Client side orig connect flags supported by server.
*/
- cfs_list_t ns_root_list;
+ __u64 ns_orig_connect_flags;
/**
* Position in global namespace list.
*/
cfs_list_t ns_unused_list;
int ns_nr_unused;
- cfs_spinlock_t ns_unused_lock;
unsigned int ns_max_unused;
unsigned int ns_max_age;
*/
cfs_time_t ns_next_dump;
- cfs_atomic_t ns_locks;
- __u64 ns_resources;
ldlm_res_policy ns_policy;
struct ldlm_valblock_ops *ns_lvbo;
void *ns_lvbp;
unsigned ns_max_nolock_size;
/**
- * Backward link to obd, required for ldlm pool to store new SLV.
+ * Limit of parallel AST RPC count.
*/
- struct obd_device *ns_obd;
-
- struct adaptive_timeout ns_at_estimate;/* estimated lock callback time*/
+ unsigned ns_max_parallel_ast;
/* callback to cancel locks before replaying it during recovery */
ldlm_cancel_for_recovery ns_cancel_for_recovery;
+ /**
+ * ldlm lock stats
+ */
+ struct lprocfs_stats *ns_stats;
+
+ unsigned ns_stopping:1; /* namespace cleanup */
};
static inline int ns_is_client(struct ldlm_namespace *ns)
ns->ns_cancel_for_recovery = arg;
}
-/*
- *
- * Resource hash table
- *
- */
-
-#define RES_HASH_BITS 12
-#define RES_HASH_SIZE (1UL << RES_HASH_BITS)
-#define RES_HASH_MASK (RES_HASH_SIZE - 1)
-
struct ldlm_lock;
typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
+struct ldlm_glimpse_work {
+ struct ldlm_lock *gl_lock; /* lock to glimpse */
+ cfs_list_t gl_list; /* linkage to other gl work structs */
+ __u32 gl_flags;/* see LDLM_GL_WORK_* below */
+};
+
+/* the ldlm_glimpse_work is allocated on the stack and should not be freed */
+#define LDLM_GL_WORK_NOFREE 0x1
+
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
struct interval_node li_node; /* node for tree mgmt */
struct interval_node *lit_root; /* actually ldlm_interval */
};
-#define LUSTRE_TRACKS_LOCK_EXP_REFS (1)
+#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
/* Cancel flag. */
typedef enum {
* in the same RPC */
} ldlm_cancel_flags_t;
+struct ldlm_flock {
+ __u64 start;
+ __u64 end;
+ __u64 owner;
+ __u64 blocking_owner;
+ struct obd_export *blocking_export;
+ /* Protected by the hash lock */
+ __u32 blocking_refs;
+ __u32 pid;
+};
+
+typedef union {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock l_flock;
+ struct ldlm_inodebits l_inodebits;
+} ldlm_policy_data_t;
+
+void ldlm_convert_policy_to_wire(ldlm_type_t type,
+ const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
+void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+ const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+
struct ldlm_lock {
/**
* Must be first in the structure.
/**
* Protected by lr_lock. Requested mode.
*/
+ /**
+ * Protected by per-bucket exp->exp_flock_hash locks. Per export hash
+ * of locks.
+ */
+ cfs_hlist_node_t l_exp_flock_hash;
+
ldlm_mode_t l_req_mode;
/**
* Granted mode, also protected by lr_lock.
__u64 l_flags;
__u32 l_readers;
__u32 l_writers;
- /*
- * Set for locks that were removed from class hash table and will be
- * destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks.
- */
- __u8 l_destroyed;
-
/**
* If the lock is granted, a process sleeps on this waitq to learn when
* it's no longer in use. If the lock is not granted, a process sleeps
struct ldlm_extent l_req_extent;
+ unsigned int l_failed:1,
+ /*
+ * Set for locks that were removed from class hash table and will be
+ * destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks.
+ */
+ l_destroyed:1,
+ /*
+ * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+ *
+ * NB: compare with check_res_locked(), check this bit is cheaper,
+ * also, spin_is_locked() is deprecated for kernel code, one reason is
+ * because it works only for SMP so user needs add extra macros like
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels.
+ */
+ l_res_locked:1,
+ /*
+ * it's set once we call ldlm_add_waiting_lock_res_locked()
+ * to start the lock-timeout timer and it will never be reset.
+ *
+ * Protected by lock_res_and_lock().
+ */
+ l_waited:1,
+ /**
+ * flag whether this is a server namespace lock.
+ */
+ l_ns_srv:1;
+
/*
* Client-side-only members.
*/
void *l_lvb_data;
void *l_ast_data;
- cfs_spinlock_t l_extents_list_lock;
- cfs_list_t l_extents_list;
-
- cfs_list_t l_cache_locks_list;
/*
* Server-side-only members.
*/
__u32 l_pid;
+ int l_bl_ast_run;
/**
* For ldlm_add_ast_work_item().
*/
cfs_list_t l_rk_ast;
struct ldlm_lock *l_blocking_lock;
- int l_bl_ast_run;
/**
* Protected by lr_lock, linkages to "skip lists".
/** referenced export object */
struct obd_export *l_exp_refs_target;
#endif
+ /** export blocking dlm lock list, protected by
+ * l_export->exp_bl_list_lock.
+ * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
+ * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */
+ cfs_list_t l_exp_list;
};
struct ldlm_resource {
- struct ldlm_namespace *lr_namespace;
+ struct ldlm_ns_bucket *lr_ns_bucket;
/* protected by ns_hash_lock */
- cfs_list_t lr_hash;
- struct ldlm_resource *lr_parent; /* 0 for a root resource */
- cfs_list_t lr_children; /* list head for child resources */
- cfs_list_t lr_childof; /* part of ns_root_list if root res,
- * part of lr_children if child */
+ cfs_hlist_node_t lr_hash;
cfs_spinlock_t lr_lock;
/* protected by lr_lock */
struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
/* Server-side-only lock value block elements */
- cfs_semaphore_t lr_lvb_sem;
+ /** to serialize lvbo_init */
+ cfs_mutex_t lr_lvb_mutex;
__u32 lr_lvb_len;
+ /** protect by lr_lock */
void *lr_lvb_data;
/* when the resource was considered as contended */
struct inode *lr_lvb_inode;
};
+static inline char *
+ldlm_ns_name(struct ldlm_namespace *ns)
+{
+ return ns->ns_rs_hash->hs_name;
+}
+
static inline struct ldlm_namespace *
ldlm_res_to_ns(struct ldlm_resource *res)
{
- return res->lr_namespace;
+ return res->lr_ns_bucket->nsb_namespace;
}
static inline struct ldlm_namespace *
static inline char *
ldlm_lock_to_ns_name(struct ldlm_lock *lock)
{
- return ldlm_lock_to_ns(lock)->ns_name;
+ return ldlm_ns_name(ldlm_lock_to_ns(lock));
}
static inline struct adaptive_timeout *
ldlm_lock_to_ns_at(struct ldlm_lock *lock)
{
- return &ldlm_lock_to_ns(lock)->ns_at_estimate;
+ return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
}
struct ldlm_ast_work {
void *ei_cb_gl; /* lock glimpse callback */
void *ei_cb_wg; /* lock weigh callback */
void *ei_cbdata; /* Data to be passed into callbacks. */
- short ei_async:1; /* async request */
};
extern struct obd_ops ldlm_obd_ops;
extern char *ldlm_typename[];
extern char *ldlm_it2str(int it);
#ifdef LIBCFS_DEBUG
-#define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
- CFS_CHECK_STACK(); \
+#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
+ CFS_CHECK_STACK(msgdata, mask, cdls); \
\
- if (((level) & D_CANTMASK) != 0 || \
- ((libcfs_debug & (level)) != 0 && \
- (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
- static struct libcfs_debug_msg_data _ldlm_dbg_data = \
- DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, \
- file, func, line); \
- _ldlm_lock_debug(lock, level, &_ldlm_dbg_data, fmt, \
- ##a ); \
- } \
+ if (((mask) & D_CANTMASK) != 0 || \
+ ((libcfs_debug & (mask)) != 0 && \
+ (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
+ _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
} while(0)
-void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
- struct libcfs_debug_msg_data *data, const char *fmt,
- ...)
- __attribute__ ((format (printf, 4, 5)));
+void _ldlm_lock_debug(struct ldlm_lock *lock,
+ struct libcfs_debug_msg_data *data,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
-#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
- static cfs_debug_limit_state_t _ldlm_cdls; \
- ldlm_lock_debug(&_ldlm_cdls, mask, lock, \
- __FILE__, __FUNCTION__, __LINE__, \
- "### " fmt , ##a); \
+#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
+ static cfs_debug_limit_state_t _ldlm_cdls; \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
+ ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
} while (0)
#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
-#define LDLM_DEBUG(lock, fmt, a...) do { \
- ldlm_lock_debug(NULL, D_DLMTRACE, lock, \
- __FILE__, __FUNCTION__, __LINE__, \
- "### " fmt , ##a); \
+#define LDLM_DEBUG(lock, fmt, a...) do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
+ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, "### " fmt , ##a);\
} while (0)
#else /* !LIBCFS_DEBUG */
+# define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
# define LDLM_DEBUG(lock, fmt, a...) ((void)0)
# define LDLM_ERROR(lock, fmt, a...) ((void)0)
-# define ldlm_lock_debuf(cdls, level, lock, file, func, line, fmt, a...) \
- ((void)0)
#endif
#define LDLM_DEBUG_NOLOCK(format, a...) \
int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
void *closure);
-int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
- void *closure);
-int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
- ldlm_res_iterator_t iter, void *closure);
+void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
+ void *closure);
int ldlm_replay_locks(struct obd_import *imp);
int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
/* ldlm_extent.c */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
+struct ldlm_callback_suite {
+ ldlm_completion_callback lcs_completion;
+ ldlm_blocking_callback lcs_blocking;
+ ldlm_glimpse_callback lcs_glimpse;
+ ldlm_weigh_callback lcs_weigh;
+};
/* ldlm_lockd.c */
+#ifdef HAVE_SERVER_SUPPORT
int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
void *data, int flag);
int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
+int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
ldlm_blocking_callback, ldlm_glimpse_callback);
+int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req,
+ const struct ldlm_callback_suite *cbs);
int ldlm_handle_convert(struct ptlrpc_request *req);
+int ldlm_handle_convert0(struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req);
int ldlm_handle_cancel(struct ptlrpc_request *req);
int ldlm_request_cancel(struct ptlrpc_request *req,
const struct ldlm_request *dlm_req, int first);
+void ldlm_revoke_export_locks(struct obd_export *exp);
+#endif
int ldlm_del_waiting_lock(struct ldlm_lock *lock);
int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
-void ldlm_revoke_export_locks(struct obd_export *exp);
int ldlm_get_ref(void);
void ldlm_put_ref(void);
int ldlm_init_export(struct obd_export *exp);
void ldlm_destroy_export(struct obd_export *exp);
/* ldlm_lock.c */
+#ifdef HAVE_SERVER_SUPPORT
ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
+#endif
void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
void ldlm_lock2handle(const struct ldlm_lock *lock,
struct lustre_handle *lockh);
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
void ldlm_cancel_callback(struct ldlm_lock *);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
+int ldlm_lock_set_data(struct lustre_handle *, void *);
static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
{
int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
+void ldlm_lock_fail_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
const struct ldlm_res_id *, ldlm_type_t type,
ldlm_policy_data_t *, ldlm_mode_t mode,
struct lustre_handle *, int unref);
+ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits);
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
__u32 *flags);
void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_reprocess_all(struct ldlm_resource *res);
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
-void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
void ldlm_lock_dump_handle(int level, struct lustre_handle *);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
struct ldlm_namespace *
ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, ldlm_appetite_t apt);
+ ldlm_side_t client, ldlm_appetite_t apt,
+ ldlm_ns_type_t ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
void ldlm_namespace_free(struct ldlm_namespace *ns,
struct obd_import *imp, int force);
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
-void ldlm_namespace_get_locked(struct ldlm_namespace *ns);
-void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup);
void ldlm_namespace_get(struct ldlm_namespace *ns);
-void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup);
+void ldlm_namespace_put(struct ldlm_namespace *ns);
int ldlm_proc_setup(void);
#ifdef LPROCFS
void ldlm_proc_cleanup(void);
lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
} while (0)
-struct ldlm_callback_suite {
- ldlm_completion_callback lcs_completion;
- ldlm_blocking_callback lcs_blocking;
- ldlm_glimpse_callback lcs_glimpse;
- ldlm_weigh_callback lcs_weigh;
-};
-
/* ldlm_request.c */
int ldlm_expired_completion_wait(void *data);
int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
struct ptlrpc_request *req,
int version, int opc, int canceloff,
cfs_list_t *cancels, int count);
-int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
- const struct ldlm_request *dlm_req,
- const struct ldlm_callback_suite *cbs);
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
int *flags, void *lvb, __u32 lvb_len,
void *data, __u32 data_len);
int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_handle_convert0(struct ptlrpc_request *req,
- const struct ldlm_request *dlm_req);
int ldlm_cli_cancel(struct lustre_handle *lockh);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
ldlm_cancel_flags_t flags, void *opaque);
cfs_spin_lock_nested(&res->lr_lock, mode);
}
-
static inline void unlock_res(struct ldlm_resource *res)
{
cfs_spin_unlock(&res->lr_lock);