);
my %dep_functions = (
+ 'alloca', 'malloc',
'CFS_ATOMIC_INIT', 'ATOMIC_INIT',
'cfs_atomic_add', 'atomic_add',
'cfs_atomic_add_return', 'atomic_add_return',
'cfs_hash_lookup_intent_t', 'enum cfs_hash_lookup_intent',
'cfs_hash_cond_arg_t', 'struct cfs_hash_cond_arg',
+ 'ldlm_appetite_t', 'enum ldlm_appetite',
+ 'ldlm_cancel_flags_t', 'enum ldlm_cancel_flags',
+ 'ldlm_error_t', 'enum ldlm_error',
+ 'ldlm_ns_hash_def_t', 'struct ldlm_ns_hash_def',
+ 'ldlm_mode_t', 'enum ldlm_mode',
+ 'ldlm_ns_type_t', 'enum ldlm_ns_type',
+ 'ldlm_policy_data_t', 'enum ldlm_policy_data',
+ 'ldlm_policy_res_t', 'enum ldlm_policy_res',
+ 'ldlm_side_t', 'enum ldlm_side',
+ 'ldlm_type_t', 'enum ldlm_type',
+ 'ldlm_wire_policy_data_t', 'union ldlm_wire_policy_data',
+
'LPROCFS', 'CONFIG_PROC_FS',
- 'alloca', 'malloc',
'mktemp', 'mkstemp',
'sprintf', 'snprintf',
'strcpy', 'strncpy',
REINT_MAX
} mds_reint_t;
-typedef enum {
+enum ldlm_cmd {
LDLM_ENQUEUE = 101,
LDLM_CONVERT = 102,
LDLM_CANCEL = 103,
LDLM_CP_CALLBACK = 105,
LDLM_GL_CALLBACK = 106,
LDLM_LAST_OPC
-} ldlm_cmd_t;
+};
#define LDLM_FIRST_OPC LDLM_ENQUEUE
enum seq_rpc_opc {
{ 901, "FLD_LAST_OPC"},
{ 0, NULL }
};
-/*const value_string lustre_ldlm_mode_t_vals[] = {*/
+/*const value_string lustre_ldlm_mode_vals[] = {*/
/* { LCK_MINMODE, "MINMODE" },*/
/* { LCK_EX, "EX" },*/
/* { LCK_PW, "PW" },*/
/*};*/
/* detailled version the information came from : http://wiki.lustre.org/images/e/e5/LustreInternals_Architecture.pdf */
-const value_string lustre_ldlm_mode_t_vals[] = {
+const value_string lustre_ldlm_mode_vals[] = {
{ LCK_MINMODE, "MINMODE" },
{ LCK_EX, "Exclusive" },
{ LCK_PW, "Protected Write" },
{ 0, NULL }
};
-const value_string lustre_ldlm_type_t_vals[] = {
+const value_string lustre_ldlm_type_vals[] = {
{ LDLM_PLAIN, "LDLM_PLAIN" },
{ LDLM_EXTENT,"LDLM_EXTENT" },
{ LDLM_FLOCK, "LDLM_FLOCK" },
/* IDL: } */
int
-lustre_dissect_enum_ldlm_mode_t(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int hf_index _U_)
+lustre_dissect_enum_ldlm_mode(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int hf_index _U_)
{
offset=dissect_uint32(tvb, offset, pinfo, tree, hf_index);
return offset;
/* IDL: } */
int
-lustre_dissect_enum_ldlm_type_t(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int hf_index _U_)
+lustre_dissect_enum_ldlm_type(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int hf_index _U_)
{
offset=dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_resource_desc_lr_type );
return offset;
}
/* IDL: struct ldlm_resource_desc { */
-/* IDL: ldlm_type_t lr_type; */
-/* IDL: uint32 lr_padding; */
-/* IDL: struct ldlm_res_id { */
+/* IDL: enum ldlm_type lr_type; */
+/* IDL: uint32 lr_padding; */
+/* IDL: struct ldlm_res_id { */
/* IDL: } lr_name; */
/* IDL: } */
lustre_dissect_element_ldlm_resource_desc_lr_type(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_)
{
/* offset=dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_resource_desc_lr_type);*/
- offset=lustre_dissect_enum_ldlm_type_t(tvb, offset, pinfo, tree, hf_lustre_ldlm_resource_desc_lr_type);
+ offset=lustre_dissect_enum_ldlm_type(tvb, offset, pinfo, tree, hf_lustre_ldlm_resource_desc_lr_type);
return offset;
}
/* IDL: struct ldlm_lock_desc { */
-/* IDL: struct ldlm_resource_desc { */
-/* IDL: } l_resource; */
-/* IDL: ldlm_mode_t l_req_mode; */
-/* IDL: ldlm_mode_t l_granted_mode; */
-/* IDL: ldlm_policy_data_t l_policy_data; */
+/* IDL: struct ldlm_resource_desc { */
+/* IDL: } l_resource; */
+/* IDL: enum ldlm_mode l_req_mode; */
+/* IDL: enum ldlm_mode l_granted_mode; */
+/* IDL: union ldlm_policy_data l_policy_data; */
/* IDL: } */
static int
static int
lustre_dissect_element_ldlm_lock_desc_l_req_mode(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_)
{
- offset=lustre_dissect_enum_ldlm_mode_t(tvb, offset, pinfo, tree, hf_lustre_ldlm_lock_desc_l_req_mode);
+ offset=lustre_dissect_enum_ldlm_mode(tvb, offset, pinfo, tree, hf_lustre_ldlm_lock_desc_l_req_mode);
return offset;
}
static int
lustre_dissect_element_ldlm_lock_desc_l_granted_mode(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_)
{
- offset=lustre_dissect_enum_ldlm_mode_t(tvb, offset, pinfo, tree, hf_lustre_ldlm_lock_desc_l_granted_mode);
+ offset=lustre_dissect_enum_ldlm_mode(tvb, offset, pinfo, tree, hf_lustre_ldlm_lock_desc_l_granted_mode);
return offset;
}
lock_req_mode = tvb_get_letohl(tvb,offset);
- display_info_fstr(parent_tree->parent, pinfo->cinfo, COL_INFO, "[%s]", val_to_str(lock_req_mode, lustre_ldlm_mode_t_vals, "Unknown lock"));
+ display_info_fstr(parent_tree->parent, pinfo->cinfo, COL_INFO, "[%s]", val_to_str(lock_req_mode, lustre_ldlm_mode_vals, "Unknown lock"));
offset=lustre_dissect_element_ldlm_lock_desc_l_req_mode(tvb, offset, pinfo, tree);
/* IDL: struct ldlm_request { */
-/* IDL: uint32 lock_flags; */
-/* IDL: uint32 lock_count; */
-/* IDL: struct ldlm_lock_desc { */
-/* IDL: } lock_desc; */
-/* IDL: struct lustre_handle { */
-/* IDL: } lock_handle[2]; */
+/* IDL: uint32 lock_flags; */
+/* IDL: uint32 lock_count; */
+/* IDL: struct ldlm_lock_desc { */
+/* IDL: } lock_desc; */
+/* IDL: struct lustre_handle { */
+/* IDL: } lock_handle[2]; */
/* IDL: } */
{ &hf_lustre_quota_adjust_qunit_qaq_flags,
{ "Qaq Flags", "lustre.quota_adjust_qunit.qaq_flags", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
{ &hf_lustre_ldlm_lock_desc_l_granted_mode,
- { "L Granted Mode", "lustre.ldlm_lock_desc.l_granted_mode", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_mode_t_vals), 0, "", HFILL }},
+ { "L Granted Mode", "lustre.ldlm_lock_desc.l_granted_mode", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_mode_vals), 0, "", HFILL }},
{ &hf_lustre_obdo_o_seq,
{ "O SEQ", "lustre.obdo.o_seq", FT_UINT64, BASE_DEC, NULL, 0, "", HFILL }},
{ &hf_lustre_obdo_o_gid,
{ &hf_lustre_llog_unlink_rec_padding,
{ "Padding", "lustre.llog_unlink_rec.padding", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
{ &hf_lustre_ldlm_lock_desc_l_req_mode,
- { "L Req Mode", "lustre.ldlm_lock_desc.l_req_mode", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_mode_t_vals), 0, "", HFILL }},
+ { "L Req Mode", "lustre.ldlm_lock_desc.l_req_mode", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_mode_vals), 0, "", HFILL }},
{ &hf_lustre_ldlm_extent_end,
{ "End", "lustre.ldlm_extent.end", FT_UINT64, BASE_DEC, NULL, 0, "", HFILL }},
{ &hf_lustre_llog_gen_rec_lgr_hdr,
{ &hf_lustre_lov_desc_ld_qos_maxage,
{ "Ld Qos Maxage", "lustre.lov_desc.ld_qos_maxage", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
{ &hf_lustre_ldlm_resource_desc_lr_type,
- { "Lr Type", "lustre.ldlm_resource_desc.lr_type", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_type_t_vals), 0, "", HFILL }},
+ { "Lr Type", "lustre.ldlm_resource_desc.lr_type", FT_UINT16, BASE_DEC, VALS(lustre_ldlm_type_vals), 0, "", HFILL }},
{ &hf_lustre_llog_setattr_rec_lsr_tail,
{ "Lsr Tail", "lustre.llog_setattr_rec.lsr_tail", FT_NONE,
BASE_NONE, NULL, 0, "", HFILL } },
int tgt_extent_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
__u64 start, __u64 end, struct lustre_handle *lh,
int mode, __u64 *flags);
-void tgt_extent_unlock(struct lustre_handle *lh, ldlm_mode_t mode);
+void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode);
int tgt_brw_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, int mode);
+ struct lustre_handle *lh, enum ldlm_mode mode);
void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
- struct lustre_handle *lh, int mode);
+ struct lustre_handle *lh, enum ldlm_mode mode);
int tgt_brw_read(struct tgt_session_info *tsi);
int tgt_brw_write(struct tgt_session_info *tsi);
int tgt_hpreq_handler(struct ptlrpc_request *req);
}
/* lock types */
-typedef enum {
- LCK_MINMODE = 0,
- LCK_EX = 1,
- LCK_PW = 2,
- LCK_PR = 4,
- LCK_CW = 8,
- LCK_CR = 16,
- LCK_NL = 32,
- LCK_GROUP = 64,
- LCK_COS = 128,
- LCK_MAXMODE
+typedef enum ldlm_mode {
+ LCK_MINMODE = 0,
+ LCK_EX = 1,
+ LCK_PW = 2,
+ LCK_PR = 4,
+ LCK_CW = 8,
+ LCK_CR = 16,
+ LCK_NL = 32,
+ LCK_GROUP = 64,
+ LCK_COS = 128,
+ LCK_MAXMODE
} ldlm_mode_t;
#define LCK_MODE_NUM 8
-typedef enum {
- LDLM_PLAIN = 10,
- LDLM_EXTENT = 11,
- LDLM_FLOCK = 12,
- LDLM_IBITS = 13,
- LDLM_MAX_TYPE
+typedef enum ldlm_type {
+ LDLM_PLAIN = 10,
+ LDLM_EXTENT = 11,
+ LDLM_FLOCK = 12,
+ LDLM_IBITS = 13,
+ LDLM_MAX_TYPE
} ldlm_type_t;
#define LDLM_MIN_TYPE LDLM_PLAIN
* this ever changes we will need to swab the union differently based
* on the resource type. */
-typedef union {
- struct ldlm_extent l_extent;
- struct ldlm_flock_wire l_flock;
- struct ldlm_inodebits l_inodebits;
+typedef union ldlm_wire_policy_data {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock_wire l_flock;
+ struct ldlm_inodebits l_inodebits;
} ldlm_wire_policy_data_t;
-extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
+extern void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d);
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
struct ldlm_resource_desc {
- ldlm_type_t lr_type;
- __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
- struct ldlm_res_id lr_name;
+ enum ldlm_type lr_type;
+ __u32 lr_pad; /* also fix lustre_swab_ldlm_resource_desc */
+ struct ldlm_res_id lr_name;
};
extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
struct ldlm_lock_desc {
- struct ldlm_resource_desc l_resource;
- ldlm_mode_t l_req_mode;
- ldlm_mode_t l_granted_mode;
- ldlm_wire_policy_data_t l_policy_data;
+ struct ldlm_resource_desc l_resource;
+ enum ldlm_mode l_req_mode;
+ enum ldlm_mode l_granted_mode;
+ union ldlm_wire_policy_data l_policy_data;
};
-extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
+extern void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1
/**
* LDLM non-error return states
*/
-typedef enum {
- ELDLM_OK = 0,
- ELDLM_LOCK_MATCHED = 1,
-
- ELDLM_LOCK_CHANGED = 300,
- ELDLM_LOCK_ABORTED = 301,
- ELDLM_LOCK_REPLACED = 302,
- ELDLM_NO_LOCK_DATA = 303,
- ELDLM_LOCK_WOULDBLOCK = 304,
-
- ELDLM_NAMESPACE_EXISTS = 400,
- ELDLM_BAD_NAMESPACE = 401
+typedef enum ldlm_error {
+ ELDLM_OK = 0,
+ ELDLM_LOCK_MATCHED = 1,
+
+ ELDLM_LOCK_CHANGED = 300,
+ ELDLM_LOCK_ABORTED = 301,
+ ELDLM_LOCK_REPLACED = 302,
+ ELDLM_NO_LOCK_DATA = 303,
+ ELDLM_LOCK_WOULDBLOCK = 304,
+
+ ELDLM_NAMESPACE_EXISTS = 400,
+ ELDLM_BAD_NAMESPACE = 401,
} ldlm_error_t;
/**
* decisions about lack of conflicts or do any autonomous lock granting without
* first speaking to a server.
*/
-typedef enum {
- LDLM_NAMESPACE_SERVER = 1 << 0,
- LDLM_NAMESPACE_CLIENT = 1 << 1
+typedef enum ldlm_side {
+ LDLM_NAMESPACE_SERVER = 0x01,
+ LDLM_NAMESPACE_CLIENT = 0x02
} ldlm_side_t;
/**
#define LCK_COMPAT_COS (LCK_COS)
/** @} Lock Compatibility Matrix */
-extern ldlm_mode_t lck_compat_array[];
+extern enum ldlm_mode lck_compat_array[];
-static inline void lockmode_verify(ldlm_mode_t mode)
+static inline void lockmode_verify(enum ldlm_mode mode)
{
- LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
+ LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
}
-static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
+static inline int lockmode_compat(enum ldlm_mode exist_mode,
+ enum ldlm_mode new_mode)
{
- return (lck_compat_array[exist_mode] & new_mode);
+ return lck_compat_array[exist_mode] & new_mode;
}
/*
};
typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
- void *req_cookie, ldlm_mode_t mode, __u64 flags,
- void *data);
+ void *req_cookie, enum ldlm_mode mode,
+ __u64 flags, void *data);
typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
* LDLM pools related, type of lock pool in the namespace.
* Greedy means release cached locks aggressively
*/
-typedef enum {
+typedef enum ldlm_appetite {
LDLM_NAMESPACE_GREEDY = 1 << 0,
LDLM_NAMESPACE_MODEST = 1 << 1
} ldlm_appetite_t;
enum {
/** LDLM namespace lock stats */
- LDLM_NSS_LOCKS = 0,
- LDLM_NSS_LAST
+ LDLM_NSS_LOCKS = 0,
+ LDLM_NSS_LAST
};
-typedef enum {
- /** invalide type */
- LDLM_NS_TYPE_UNKNOWN = 0,
- /** mdc namespace */
- LDLM_NS_TYPE_MDC,
- /** mds namespace */
- LDLM_NS_TYPE_MDT,
- /** osc namespace */
- LDLM_NS_TYPE_OSC,
- /** ost namespace */
- LDLM_NS_TYPE_OST,
- /** mgc namespace */
- LDLM_NS_TYPE_MGC,
- /** mgs namespace */
- LDLM_NS_TYPE_MGT,
+typedef enum ldlm_ns_type {
+ LDLM_NS_TYPE_UNKNOWN = 0, /**< invalid type */
+ LDLM_NS_TYPE_MDC, /**< MDC namespace */
+ LDLM_NS_TYPE_MDT, /**< MDT namespace */
+ LDLM_NS_TYPE_OSC, /**< OSC namespace */
+ LDLM_NS_TYPE_OST, /**< OST namespace */
+ LDLM_NS_TYPE_MGC, /**< MGC namespace */
+ LDLM_NS_TYPE_MGT, /**< MGT namespace */
} ldlm_ns_type_t;
/**
struct obd_device *ns_obd;
/** Flag indicating if namespace is on client instead of server */
- ldlm_side_t ns_client;
+ enum ldlm_side ns_client;
/** Resource hash table for namespace. */
struct cfs_hash *ns_rs_hash;
/** LDLM pool structure for this namespace */
struct ldlm_pool ns_pool;
/** Definition of how eagerly unused locks will be released from LRU */
- ldlm_appetite_t ns_appetite;
+ enum ldlm_appetite ns_appetite;
/**
* If more than \a ns_contended_locks are found, the resource is
struct ldlm_interval_tree {
/** Tree size. */
int lit_size;
- ldlm_mode_t lit_mode; /* lock mode */
+ enum ldlm_mode lit_mode; /* lock mode */
struct interval_node *lit_root; /* actual ldlm_interval */
};
#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
/** Cancel flags. */
-typedef enum {
- LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
- LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
- LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
- * in the same RPC */
+typedef enum ldlm_cancel_flags {
+ LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
+ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
+ LCF_BL_AST = 0x4, /* Cancel LDLM_FL_BL_AST locks in the same RPC */
} ldlm_cancel_flags_t;
struct ldlm_flock {
- __u64 start;
- __u64 end;
- __u64 owner;
- __u64 blocking_owner;
- struct obd_export *blocking_export;
+ __u64 start;
+ __u64 end;
+ __u64 owner;
+ __u64 blocking_owner;
+ struct obd_export *blocking_export;
/* Protected by the hash lock */
__u32 blocking_refs;
- __u32 pid;
+ __u32 pid;
};
union ldlm_policy_data {
typedef union ldlm_policy_data ldlm_policy_data_t;
-void ldlm_convert_policy_to_wire(ldlm_type_t type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+void ldlm_convert_policy_to_wire(enum ldlm_type type,
+ const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
enum lvb_type {
LVB_T_NONE = 0,
* Requested mode.
* Protected by lr_lock.
*/
- ldlm_mode_t l_req_mode;
+ enum ldlm_mode l_req_mode;
/**
* Granted mode, also protected by lr_lock.
*/
- ldlm_mode_t l_granted_mode;
+ enum ldlm_mode l_granted_mode;
/** Lock completion handler pointer. Called when lock is granted. */
ldlm_completion_callback l_completion_ast;
/**
* Representation of private data specific for a lock type.
* Examples are: extent range for extent lock or bitmask for ibits locks
*/
- ldlm_policy_data_t l_policy_data;
+ union ldlm_policy_data l_policy_data;
/**
* Lock state flags. Protected by lr_lock.
};
/** Type of locks this resource can hold. Only one type per resource. */
- ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
+ enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
/**
* Server-side-only lock value block elements.
* Common ldlm_enqueue parameters
*/
struct ldlm_enqueue_info {
- __u32 ei_type; /** Type of the lock being enqueued. */
- __u32 ei_mode; /** Mode of the lock being enqueued. */
- void *ei_cb_bl; /** blocking lock callback */
- void *ei_cb_local_bl; /** blocking local lock callback */
- void *ei_cb_cp; /** lock completion callback */
- void *ei_cb_gl; /** lock glimpse callback */
- void *ei_cbdata; /** Data to be passed into callbacks. */
- void *ei_namespace; /** lock namespace **/
- unsigned int ei_enq_slave:1, /* whether enqueue slave stripes */
- ei_nonblock:1; /* non block enqueue */
+ enum ldlm_type ei_type; /** Type of the lock being enqueued. */
+ enum ldlm_mode ei_mode; /** Mode of the lock being enqueued. */
+ void *ei_cb_bl; /** blocking lock callback */
+ void *ei_cb_local_bl; /** blocking local lock callback */
+ void *ei_cb_cp; /** lock completion callback */
+ void *ei_cb_gl; /** lock glimpse callback */
+ void *ei_cbdata; /** Data to be passed into callbacks. */
+ void *ei_namespace; /** lock namespace **/
+ unsigned int ei_enq_slave:1, /** whether enqueue slave stripes */
+ ei_nonblock:1; /** non block enqueue */
};
#define ei_res_id ei_cb_gl
#endif
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
/**
* data from request \a r
*/
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
- struct ptlrpc_request *r, int increase)
+ struct ptlrpc_request *req, int increase)
{
int rc;
return rc;
}
- if (ldlm_res_to_ns(res)->ns_lvbo &&
- ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
- return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
- increase);
- }
- return 0;
+ if (ldlm_res_to_ns(res)->ns_lvbo &&
+ ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
+ return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, req,
+ increase);
+ }
+ return 0;
}
-int ldlm_error2errno(ldlm_error_t error);
-ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
- * confuses user-space. */
+int ldlm_error2errno(enum ldlm_error error);
+enum ldlm_error ldlm_errno2error(int err_no); /* don't call it `errno': this
+ * confuses user-space. */
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_dump_export_locks(struct obd_export *exp);
#endif
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock_destroy(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(struct lustre_handle *lockh, enum ldlm_mode mode);
+int ldlm_lock_addref_try(struct lustre_handle *lockh, enum ldlm_mode mode);
+void ldlm_lock_decref(struct lustre_handle *lockh, enum ldlm_mode mode);
+void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh,
+ enum ldlm_mode mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_fail_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *, ldlm_type_t type,
- ldlm_policy_data_t *, ldlm_mode_t mode,
- struct lustre_handle *, int unref);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits);
-struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags);
-void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *, enum ldlm_type type,
+ union ldlm_policy_data *, enum ldlm_mode mode,
+ struct lustre_handle *, int unref);
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits);
+struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock,
+ enum ldlm_mode new_mode, __u32 *flags);
+void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_reprocess_all(struct ldlm_resource *res);
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
-struct ldlm_namespace *
-ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type);
+struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
+ enum ldlm_side client,
+ enum ldlm_appetite apt,
+ enum ldlm_ns_type ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
struct obd_import *imp,
int force);
void ldlm_namespace_free_post(struct ldlm_namespace *ns);
void ldlm_namespace_free(struct ldlm_namespace *ns,
- struct obd_import *imp, int force);
-void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
-void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
+ struct obd_import *imp, int force);
+void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client);
+void ldlm_namespace_unregister(struct ldlm_namespace *ns,
+ enum ldlm_side client);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
int ldlm_proc_setup(void);
/* resource.c - internal */
struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
- struct ldlm_resource *parent,
- const struct ldlm_res_id *,
- ldlm_type_t type, int create);
+ struct ldlm_resource *parent,
+ const struct ldlm_res_id *,
+ enum ldlm_type type, int create);
struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
int ldlm_resource_putref(struct ldlm_resource *res);
void ldlm_resource_add_lock(struct ldlm_resource *res,
struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
void ldlm_namespace_dump(int level, struct ldlm_namespace *);
void ldlm_resource_dump(int level, struct ldlm_resource *);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
* to obtain and release locks.
* @{ */
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ struct ldlm_enqueue_info *einfo,
+ const struct ldlm_res_id *res_id,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
const struct ldlm_request *dlm_req,
const struct ldlm_callback_suite *cbs);
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc);
+ enum ldlm_type type, __u8 with_policy,
+ enum ldlm_mode mode, __u64 *flags, void *lvb,
+ __u32 lvb_len,
+ struct lustre_handle *lockh, int rc);
int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 *flags,
- ldlm_blocking_callback blocking,
- ldlm_completion_callback completion,
- ldlm_glimpse_callback glimpse,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type, union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 *flags,
+ ldlm_blocking_callback blocking,
+ ldlm_completion_callback completion,
+ ldlm_glimpse_callback glimpse,
void *data, __u32 lvb_len, enum lvb_type lvb_type,
- const __u64 *client_cookie,
- struct lustre_handle *lockh);
+ const __u64 *client_cookie,
+ struct lustre_handle *lockh);
int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
- void *data, __u32 data_len);
+ void *data, __u32 data_len);
int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags);
+ enum ldlm_cancel_flags cancel_flags);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
- ldlm_cancel_flags_t flags, void *opaque);
+ enum ldlm_cancel_flags flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque);
+ const struct ldlm_res_id *res_id,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque);
int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
- int count, ldlm_cancel_flags_t flags);
+ int count, enum ldlm_cancel_flags flags);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque);
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 lock_flags,
+ enum ldlm_cancel_flags cancel_flags,
+ void *opaque);
int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags);
+ enum ldlm_cancel_flags flags);
int ldlm_cli_cancel_list(struct list_head *head, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
+ struct ptlrpc_request *req,
+ enum ldlm_cancel_flags flags);
/** @} ldlm_cli_api */
/* mds/handler.c */
* There are not used outside of ldlm.
* @{
*/
-int ldlm_pools_recalc(ldlm_side_t client);
+int ldlm_pools_recalc(enum ldlm_side client);
int ldlm_pools_init(void);
void ldlm_pools_fini(void);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client);
+ int idx, enum ldlm_side client);
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask);
void ldlm_pool_fini(struct ldlm_pool *pl);
int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
*/
struct lustre_msg *rs_msg; /* reply message */
- /** Number of locks awaiting client ACK */
- int rs_nlocks;
- /** Handles of locks awaiting client reply ACK */
- struct lustre_handle rs_locks[RS_MAX_LOCKS];
- /** Lock modes of locks in \a rs_locks */
- ldlm_mode_t rs_modes[RS_MAX_LOCKS];
+ /** Number of locks awaiting client ACK */
+ int rs_nlocks;
+ /** Handles of locks awaiting client reply ACK */
+ struct lustre_handle rs_locks[RS_MAX_LOCKS];
+ /** Lock modes of locks in \a rs_locks */
+ enum ldlm_mode rs_modes[RS_MAX_LOCKS];
};
struct ptlrpc_thread;
int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
- ldlm_mode_t (*m_lock_match)(struct obd_export *, __u64,
- const struct lu_fid *, ldlm_type_t,
- ldlm_policy_data_t *, ldlm_mode_t,
- struct lustre_handle *);
+ enum ldlm_mode (*m_lock_match)(struct obd_export *, __u64,
+ const struct lu_fid *, enum ldlm_type,
+ union ldlm_policy_data *, enum ldlm_mode,
+ struct lustre_handle *);
int (*m_cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, ldlm_mode_t,
- ldlm_cancel_flags_t flags, void *opaque);
+ union ldlm_policy_data *, enum ldlm_mode,
+ enum ldlm_cancel_flags flags, void *opaque);
int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *,
u32, struct ptlrpc_request **);
RETURN(MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits));
}
-static inline int md_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
+static inline
+int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
+ enum ldlm_cancel_flags cancel_flags, void *opaque)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- EXP_CHECK_MD_OP(exp, cancel_unused);
- EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
+ EXP_CHECK_MD_OP(exp, cancel_unused);
+ EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
- rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
- flags, opaque);
- RETURN(rc);
+ rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
+ cancel_flags, opaque);
+ RETURN(rc);
}
-static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid,
- ldlm_type_t type,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- struct lustre_handle *lockh)
+static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid,
+ enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh)
{
- ENTRY;
- EXP_CHECK_MD_OP(exp, lock_match);
- EXP_MD_COUNTER_INCREMENT(exp, lock_match);
- RETURN(MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
- policy, mode, lockh));
+ ENTRY;
+ EXP_CHECK_MD_OP(exp, lock_match);
+ EXP_MD_COUNTER_INCREMENT(exp, lock_match);
+ RETURN(MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
+ policy, mode, lockh));
}
static inline int md_init_ea_size(struct obd_export *exp, __u32 ea_size,
* overly wide locks.
*/
static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
- struct ldlm_extent *new_ex,
- int conflicting)
+ struct ldlm_extent *new_ex,
+ int conflicting)
{
- ldlm_mode_t req_mode = req->l_req_mode;
- __u64 req_start = req->l_req_extent.start;
- __u64 req_end = req->l_req_extent.end;
- __u64 req_align, mask;
+ enum ldlm_mode req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ __u64 req_align, mask;
if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
struct ldlm_extent *new_ex)
{
- struct ldlm_resource *res = req->l_resource;
- ldlm_mode_t req_mode = req->l_req_mode;
- __u64 req_start = req->l_req_extent.start;
- __u64 req_end = req->l_req_extent.end;
- struct ldlm_interval_tree *tree;
- struct interval_node_extent limiter = { new_ex->start, new_ex->end };
- int conflicting = 0;
- int idx;
- ENTRY;
+ struct ldlm_resource *res = req->l_resource;
+ enum ldlm_mode req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ struct ldlm_interval_tree *tree;
+ struct interval_node_extent limiter = { new_ex->start, new_ex->end };
+ int conflicting = 0;
+ int idx;
+ ENTRY;
- lockmode_verify(req_mode);
+ lockmode_verify(req_mode);
/* Using interval tree to handle the LDLM extent granted locks. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
struct ldlm_extent *new_ex)
{
struct ldlm_resource *res = req->l_resource;
- ldlm_mode_t req_mode = req->l_req_mode;
+ enum ldlm_mode req_mode = req->l_req_mode;
__u64 req_start = req->l_req_extent.start;
__u64 req_end = req->l_req_extent.end;
struct ldlm_lock *lock;
static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
{
- struct ldlm_resource *res = lock->l_resource;
- cfs_time_t now = cfs_time_current();
+ struct ldlm_resource *res = lock->l_resource;
+ cfs_time_t now = cfs_time_current();
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
- return 1;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
+ return 1;
- CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
- if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
- res->lr_contention_time = now;
- return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
- cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
+ CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
+ if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
+ res->lr_contention_time = now;
+ return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
+ cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
}
struct ldlm_extent_compat_args {
struct list_head *work_list;
- struct ldlm_lock *lock;
- ldlm_mode_t mode;
- int *locks;
- int *compat;
+ struct ldlm_lock *lock;
+ enum ldlm_mode mode;
+ int *locks;
+ int *compat;
};
static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
- void *data)
+ void *data)
{
- struct ldlm_extent_compat_args *priv = data;
- struct ldlm_interval *node = to_ldlm_interval(n);
- struct ldlm_extent *extent;
+ struct ldlm_extent_compat_args *priv = data;
+ struct ldlm_interval *node = to_ldlm_interval(n);
+ struct ldlm_extent *extent;
struct list_head *work_list = priv->work_list;
- struct ldlm_lock *lock, *enq = priv->lock;
- ldlm_mode_t mode = priv->mode;
- int count = 0;
- ENTRY;
+ struct ldlm_lock *lock, *enq = priv->lock;
+ enum ldlm_mode mode = priv->mode;
+ int count = 0;
+ ENTRY;
LASSERT(!list_empty(&node->li_group));
*/
static int
ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
- __u64 *flags, ldlm_error_t *err,
+ __u64 *flags, enum ldlm_error *err,
struct list_head *work_list, int *contended_locks)
{
struct ldlm_resource *res = req->l_resource;
- ldlm_mode_t req_mode = req->l_req_mode;
+ enum ldlm_mode req_mode = req->l_req_mode;
__u64 req_start = req->l_req_extent.start;
__u64 req_end = req->l_req_extent.end;
struct ldlm_lock *lock;
* would be collected and ASTs sent.
*/
int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
return list_empty(&n->li_group) ? n : NULL;
}
-static inline int lock_mode_to_index(ldlm_mode_t mode)
+static inline int ldlm_mode_to_index(enum ldlm_mode mode)
{
- int index;
-
- LASSERT(mode != 0);
- LASSERT(IS_PO2(mode));
- for (index = -1; mode; index++, mode >>= 1) ;
- LASSERT(index < LCK_MODE_NUM);
- return index;
+ int index;
+
+ LASSERT(mode != 0);
+ LASSERT(IS_PO2(mode));
+ for (index = -1; mode != 0; index++, mode >>= 1)
+ /* do nothing */;
+ LASSERT(index < LCK_MODE_NUM);
+ return index;
}
/** Add newly granted lock into interval tree for the resource. */
LASSERT(node != NULL);
LASSERT(!interval_is_intree(&node->li_node));
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
+ idx = ldlm_mode_to_index(lock->l_granted_mode);
+ LASSERT(lock->l_granted_mode == 1 << idx);
+ LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
/* node extent initialize */
extent = &lock->l_policy_data.l_extent;
/** Remove cancelled lock from resource interval tree. */
void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
{
- struct ldlm_resource *res = lock->l_resource;
- struct ldlm_interval *node = lock->l_tree_node;
- struct ldlm_interval_tree *tree;
- int idx;
+ struct ldlm_resource *res = lock->l_resource;
+ struct ldlm_interval *node = lock->l_tree_node;
+ struct ldlm_interval_tree *tree;
+ int idx;
- if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
- return;
+ if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
+ return;
- idx = lock_mode_to_index(lock->l_granted_mode);
- LASSERT(lock->l_granted_mode == 1 << idx);
- tree = &res->lr_itree[idx];
+ idx = ldlm_mode_to_index(lock->l_granted_mode);
+ LASSERT(lock->l_granted_mode == 1 << idx);
+ tree = &res->lr_itree[idx];
- LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+ LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
- tree->lit_size--;
- node = ldlm_interval_detach(lock);
- if (node) {
- interval_erase(&node->li_node, &tree->lit_root);
- ldlm_interval_free(node);
- }
+ tree->lit_size--;
+ node = ldlm_interval_detach(lock);
+ if (node) {
+ interval_erase(&node->li_node, &tree->lit_root);
+ ldlm_interval_free(node);
+ }
}
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_extent.start = wpolicy->l_extent.start;
- lpolicy->l_extent.end = wpolicy->l_extent.end;
- lpolicy->l_extent.gid = wpolicy->l_extent.gid;
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_extent.start = wpolicy->l_extent.start;
+ lpolicy->l_extent.end = wpolicy->l_extent.end;
+ lpolicy->l_extent.gid = wpolicy->l_extent.gid;
}
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_extent.start = lpolicy->l_extent.start;
- wpolicy->l_extent.end = lpolicy->l_extent.end;
- wpolicy->l_extent.gid = lpolicy->l_extent.gid;
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_extent.start = lpolicy->l_extent.start;
+ wpolicy->l_extent.end = lpolicy->l_extent.end;
+ wpolicy->l_extent.gid = lpolicy->l_extent.gid;
}
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
{
ENTRY;
*/
int
ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ enum ldlm_error *err, struct list_head *work_list)
{
- struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+ struct ldlm_resource *res = req->l_resource;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
struct list_head *tmp;
struct list_head *ownlocks = NULL;
- struct ldlm_lock *lock = NULL;
- struct ldlm_lock *new = req;
- struct ldlm_lock *new2 = NULL;
- ldlm_mode_t mode = req->l_req_mode;
- int local = ns_is_client(ns);
- int added = (mode == LCK_NL);
- int overlaps = 0;
- int splitted = 0;
- const struct ldlm_callback_suite null_cbs = { NULL };
- ENTRY;
+ struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *new = req;
+ struct ldlm_lock *new2 = NULL;
+ enum ldlm_mode mode = req->l_req_mode;
+ int local = ns_is_client(ns);
+ int added = (mode == LCK_NL);
+ int overlaps = 0;
+ int splitted = 0;
+ const struct ldlm_callback_suite null_cbs = { NULL };
+ ENTRY;
CDEBUG(D_DLMTRACE, "flags "LPX64" owner "LPU64" pid %u mode %u start "
LPU64" end "LPU64"\n", *flags,
int
ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
- struct file_lock *getlk = lock->l_ast_data;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct ldlm_flock_wait_data fwd;
- struct l_wait_info lwi;
- ldlm_error_t err;
- int rc = 0;
- ENTRY;
+ struct file_lock *getlk = lock->l_ast_data;
+ struct obd_device *obd;
+ struct obd_import *imp = NULL;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ enum ldlm_error err;
+ int rc = 0;
+ ENTRY;
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
RETURN(0);
}
-void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
}
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
- wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
- wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
- wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+ wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+ wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+ wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
}
/*
struct list_head *work_list)
{
struct list_head *tmp;
- struct ldlm_lock *lock;
- ldlm_mode_t req_mode = req->l_req_mode;
- __u64 req_bits = req->l_policy_data.l_inodebits.bits;
- int compat = 1;
- ENTRY;
+ struct ldlm_lock *lock;
+ enum ldlm_mode req_mode = req->l_req_mode;
+ __u64 req_bits = req->l_policy_data.l_inodebits.bits;
+ int compat = 1;
+ ENTRY;
- LASSERT(req_bits); /* There is no sense in lock with no bits set,
- I think. Also such a lock would be compatible
- with any other bit lock */
+ /* There is no sense in lock with no bits set, I think.
+ * Also, such a lock would be compatible with any other bit lock */
+ LASSERT(req_bits != 0);
list_for_each(tmp, queue) {
struct list_head *mode_tail;
* would be collected and ASTs sent.
*/
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
}
#endif /* HAVE_SERVER_SUPPORT */
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
}
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
}
extern struct list_head ldlm_cli_inactive_namespace_list;
extern unsigned int ldlm_cancel_unused_locks_before_replay;
-static inline int ldlm_namespace_nr_read(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(enum ldlm_side client)
{
return client == LDLM_NAMESPACE_SERVER ?
ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
}
-static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr++;
ldlm_cli_namespace_nr++;
}
-static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
{
if (client == LDLM_NAMESPACE_SERVER)
ldlm_srv_namespace_nr--;
ldlm_cli_namespace_nr--;
}
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
{
- return client == LDLM_NAMESPACE_SERVER ?
+ return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
}
-static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
+static inline
+struct list_head *ldlm_namespace_inactive_list(enum ldlm_side client)
{
- return client == LDLM_NAMESPACE_SERVER ?
+ return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
}
-static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
}
/* ns_bref is the number of resources in this namespace */
return atomic_read(&ns->ns_bref) == 0;
}
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
+ enum ldlm_side);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
+ enum ldlm_side);
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
-enum {
- LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
- LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
- LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
- LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4,/* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs) */
- LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
+enum ldlm_lru_flags {
+ LDLM_LRU_FLAG_AGED = 0x01, /* Cancel aged locks (non LRU resize) */
+ LDLM_LRU_FLAG_PASSED = 0x02, /* Cancel passed number of locks */
+ LDLM_LRU_FLAG_SHRINK = 0x04, /* Cancel locks from shrinker */
+ LDLM_LRU_FLAG_LRUR = 0x08, /* Cancel locks from lru resize */
+ LDLM_LRU_FLAG_NO_WAIT = 0x10, /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any RPCs) */
+ LDLM_LRU_FLAG_LRUR_NO_WAIT = 0x20, /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t sync, int flags);
+ enum ldlm_cancel_flags cancel_flags,
+ enum ldlm_lru_flags lru_flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
- ldlm_cancel_flags_t cancel_flags, int flags);
+ enum ldlm_cancel_flags cancel_flags,
+ enum ldlm_lru_flags lru_flags);
extern unsigned int ldlm_enqueue_min;
/* ldlm_resource.c */
extern struct kmem_cache *ldlm_resource_slab;
enum req_location loc, void *data, int size);
struct ldlm_lock *
ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
- ldlm_type_t type, ldlm_mode_t,
- const struct ldlm_callback_suite *cbs,
+ enum ldlm_type type, enum ldlm_mode mode,
+ const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len, enum lvb_type lvb_type);
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
- void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
+ void *cookie, __u64 *flags);
+void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list);
#ifdef HAVE_SERVER_SUPPORT
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags);
+ enum ldlm_cancel_flags cancel_flags);
int ldlm_bl_thread_wakeup(void);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
#ifdef HAVE_SERVER_SUPPORT
/* ldlm_plain.c */
int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
/* ldlm_inodebits.c */
int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
#endif
/* ldlm_extent.c */
#ifdef HAVE_SERVER_SUPPORT
int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
#endif
void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
/* ldlm_flock.c */
int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
int ldlm_init_flock_export(struct obd_export *exp);
void ldlm_destroy_flock_export(struct obd_export *exp);
return ret;
}
-typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
- ldlm_policy_data_t *);
-
-typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
- ldlm_wire_policy_data_t *);
-
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
-void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
+typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
+ union ldlm_policy_data *);
+typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
+ union ldlm_wire_policy_data *);
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy);
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy);
/* ldlm_reclaim.c */
extern __u64 ldlm_watermark_low;
struct obd_uuid server_uuid;
int rq_portal, rp_portal, connect_op;
char *name = obddev->obd_type->typ_name;
- ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
- int rc;
+ enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN;
char *cli_name = lustre_cfg_buf(lcfg, 0);
+ int rc;
ENTRY;
- /* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there. */
+ /* In a more perfect world, we would hang a ptlrpc_client off of
+ * obd_type and just use the values from there. */
if (!strcmp(name, LUSTRE_OSC_NAME)) {
rq_portal = OST_REQUEST_PORTAL;
rp_portal = OSC_REPLY_PORTAL;
EXIT;
}
-ldlm_mode_t lck_compat_array[] = {
+enum ldlm_mode lck_compat_array[] = {
[LCK_EX] = LCK_COMPAT_EX,
[LCK_PW] = LCK_COMPAT_PW,
[LCK_PR] = LCK_COMPAT_PR,
* Rather arbitrary mapping from LDLM error codes to errno values. This should
* not escape to the user level.
*/
-int ldlm_error2errno(ldlm_error_t error)
+int ldlm_error2errno(enum ldlm_error error)
{
- int result;
+ int result;
- switch (error) {
- case ELDLM_OK:
+ switch (error) {
+ case ELDLM_OK:
case ELDLM_LOCK_MATCHED:
result = 0;
break;
case ELDLM_BAD_NAMESPACE:
result = -EBADF;
break;
- default:
- if (((int)error) < 0) /* cast to signed type */
- result = error; /* as ldlm_error_t can be unsigned */
- else {
- CERROR("Invalid DLM result code: %d\n", error);
- result = -EPROTO;
- }
- }
- return result;
+ default:
+ if (((int)error) < 0) { /* cast to signed type */
+ result = error; /* as ldlm_error can be unsigned */
+ } else {
+ CERROR("Invalid DLM result code: %d\n", error);
+ result = -EPROTO;
+ }
+ }
+ return result;
}
EXPORT_SYMBOL(ldlm_error2errno);
/**
- * Dual to ldlm_error2errno(): maps errno values back to ldlm_error_t.
+ * Dual to ldlm_error2errno(): maps errno values back to enum ldlm_error.
*/
-ldlm_error_t ldlm_errno2error(int err_no)
+enum ldlm_error ldlm_errno2error(int err_no)
{
int error;
/**
* Converts lock policy from local format to on the wire lock_desc format
*/
-void ldlm_convert_policy_to_wire(ldlm_type_t type,
- const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_convert_policy_to_wire(enum ldlm_type type,
+ const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- ldlm_policy_local_to_wire_t convert;
+ ldlm_policy_local_to_wire_t convert;
- convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
+ convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
- convert(lpolicy, wpolicy);
+ convert(lpolicy, wpolicy);
}
/**
* Converts lock policy from on the wire lock_desc format to local format
*/
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
- const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
+ const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
ldlm_policy_wire_to_local_t convert;
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
* Removes lock from LRU if it is there.
* Assumes the LDLM lock is already locked.
*/
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
ldlm_lock_remove_from_lru(lock);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock;
int result;
* Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
* Only called for local locks.
*/
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
- lock_res_and_lock(lock);
- ldlm_lock_addref_internal_nolock(lock, mode);
- unlock_res_and_lock(lock);
+ lock_res_and_lock(lock);
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ unlock_res_and_lock(lock);
}
/**
* Does NOT add lock to LRU if no r/w references left to accomodate flock locks
* that cannot be placed in LRU.
*/
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+ enum ldlm_mode mode)
{
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
* on the namespace.
* For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
*/
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
struct ldlm_namespace *ns;
ENTRY;
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(struct lustre_handle *lockh, enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
* drops to zero instead of putting into LRU.
*
*/
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh,
+ enum ldlm_mode mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
ENTRY;
* Describe the overlap between two locks. itree_overlap_cb data.
*/
struct lock_match_data {
- struct ldlm_lock *lmd_old;
- struct ldlm_lock *lmd_lock;
- ldlm_mode_t *lmd_mode;
- ldlm_policy_data_t *lmd_policy;
- __u64 lmd_flags;
- int lmd_unref;
+ struct ldlm_lock *lmd_old;
+ struct ldlm_lock *lmd_lock;
+ enum ldlm_mode *lmd_mode;
+ union ldlm_policy_data *lmd_policy;
+ __u64 lmd_flags;
+ int lmd_unref;
};
/**
*/
static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
{
- ldlm_policy_data_t *lpol = &lock->l_policy_data;
- ldlm_mode_t match;
+ union ldlm_policy_data *lpol = &lock->l_policy_data;
+ enum ldlm_mode match;
if (lock == data->lmd_old)
return INTERVAL_ITER_STOP;
* keep caller code unchanged), the context failure will be discovered by
* caller sometime later.
*/
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *res_id, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh, int unref)
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh, int unref)
{
struct lock_match_data data = {
.lmd_old = NULL,
}
EXPORT_SYMBOL(ldlm_lock_match);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits)
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits)
{
- struct ldlm_lock *lock;
- ldlm_mode_t mode = 0;
- ENTRY;
+ struct ldlm_lock *lock;
+ enum ldlm_mode mode = 0;
+ ENTRY;
- lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
- lock_res_and_lock(lock);
+ lock = ldlm_handle2lock(lockh);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
if (LDLM_HAVE_MASK(lock, GONE))
- GOTO(out, mode);
+ GOTO(out, mode);
if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
*/
struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_type_t type,
- ldlm_mode_t mode,
+ enum ldlm_type type,
+ enum ldlm_mode mode,
const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len,
enum lvb_type lvb_type)
* set, skip all the enqueueing and delegate lock processing to intent policy
* function.
*/
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp,
- void *cookie, __u64 *flags)
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+ struct ldlm_lock **lockp,
+ void *cookie, __u64 *flags)
{
- struct ldlm_lock *lock = *lockp;
- struct ldlm_resource *res = lock->l_resource;
- int local = ns_is_client(ldlm_res_to_ns(res));
+ struct ldlm_lock *lock = *lockp;
+ struct ldlm_resource *res = lock->l_resource;
+ int local = ns_is_client(ldlm_res_to_ns(res));
#ifdef HAVE_SERVER_SUPPORT
- ldlm_processing_policy policy;
+ ldlm_processing_policy policy;
#endif
- ldlm_error_t rc = ELDLM_OK;
- struct ldlm_interval *node = NULL;
- ENTRY;
+ enum ldlm_error rc = ELDLM_OK;
+ struct ldlm_interval *node = NULL;
+ ENTRY;
/* policies are not executed on the client or during replay */
if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
struct list_head *work_list)
{
struct list_head *tmp, *pos;
- ldlm_processing_policy policy;
+ ldlm_processing_policy policy;
__u64 flags;
- int rc = LDLM_ITER_CONTINUE;
- ldlm_error_t err;
- ENTRY;
+ int rc = LDLM_ITER_CONTINUE;
+ enum ldlm_error err;
+ ENTRY;
- check_res_locked(res);
+ check_res_locked(res);
- policy = ldlm_processing_policy_table[res->lr_type];
- LASSERT(policy);
+ policy = ldlm_processing_policy_table[res->lr_type];
+ LASSERT(policy);
list_for_each_safe(tmp, pos, queue) {
- struct ldlm_lock *pending;
+ struct ldlm_lock *pending;
+
pending = list_entry(tmp, struct ldlm_lock, l_res_link);
CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
* \param lock A lock to convert
* \param new_mode new lock mode
*/
-void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
+void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
{
ENTRY;
* optimizations could take advantage of it to avoid discarding cached
* pages on a file.
*/
-struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- __u32 *flags)
+struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock,
+ enum ldlm_mode new_mode, __u32 *flags)
{
struct list_head rpc_list;
struct ldlm_resource *res;
lock->l_completion_ast(lock, 0, NULL);
}
#ifdef HAVE_SERVER_SUPPORT
- } else {
- int rc;
- ldlm_error_t err;
+ } else {
+ int rc;
+ enum ldlm_error err;
__u64 pflags = 0;
- ldlm_processing_policy policy;
+ ldlm_processing_policy policy;
+
policy = ldlm_processing_policy_table[res->lr_type];
rc = policy(lock, &pflags, 0, &err, &rpc_list);
if (rc == LDLM_ITER_STOP) {
};
struct ldlm_bl_work_item {
- struct list_head blwi_entry;
- struct ldlm_namespace *blwi_ns;
- struct ldlm_lock_desc blwi_ld;
- struct ldlm_lock *blwi_lock;
- struct list_head blwi_head;
- int blwi_count;
- struct completion blwi_comp;
- ldlm_cancel_flags_t blwi_flags;
- int blwi_mem_pressure;
+ struct list_head blwi_entry;
+ struct ldlm_namespace *blwi_ns;
+ struct ldlm_lock_desc blwi_ld;
+ struct ldlm_lock *blwi_lock;
+ struct list_head blwi_head;
+ int blwi_count;
+ struct completion blwi_comp;
+ enum ldlm_cancel_flags blwi_flags;
+ int blwi_mem_pressure;
};
#ifdef HAVE_SERVER_SUPPORT
* service threads to carry out client lock enqueueing requests.
*/
int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
- struct ptlrpc_request *req,
- const struct ldlm_request *dlm_req,
- const struct ldlm_callback_suite *cbs)
+ struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req,
+ const struct ldlm_callback_suite *cbs)
{
- struct ldlm_reply *dlm_rep;
+ struct ldlm_reply *dlm_rep;
__u64 flags;
- ldlm_error_t err = ELDLM_OK;
- struct ldlm_lock *lock = NULL;
- void *cookie = NULL;
- int rc = 0;
+ enum ldlm_error err = ELDLM_OK;
+ struct ldlm_lock *lock = NULL;
+ void *cookie = NULL;
+ int rc = 0;
struct ldlm_resource *res = NULL;
- ENTRY;
+ ENTRY;
- LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
+ LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF, LATF_SKIP);
flags = ldlm_flags_from_wire(dlm_req->lock_flags);
- LASSERT(req->rq_export);
+ LASSERT(req->rq_export);
if (ptlrpc_req2svc(req)->srv_stats != NULL)
ldlm_svc_get_eopc(dlm_req, ptlrpc_req2svc(req)->srv_stats);
}
static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
ENTRY;
struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
struct ldlm_lock *lock,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
init_completion(&blwi->blwi_comp);
INIT_LIST_HEAD(&blwi->blwi_head);
struct ldlm_lock_desc *ld,
struct ldlm_lock *lock,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
ENTRY;
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
}
ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
struct list_head *work_list)
{
- ldlm_mode_t req_mode = req->l_req_mode;
+ enum ldlm_mode req_mode = req->l_req_mode;
struct ldlm_lock *lock;
struct list_head *tmp;
int compat = 1;
* would be collected and ASTs sent.
*/
int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
}
#endif /* HAVE_SERVER_SUPPORT */
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
- /* No policy for plain locks */
+ /* No policy for plain locks */
}
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- /* No policy for plain locks */
+ /* No policy for plain locks */
}
* take into account pl->pl_recalc_time here.
*/
ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
- LDLM_CANCEL_LRUR);
+ LDLM_LRU_FLAG_LRUR);
out:
spin_lock(&pl->pl_lock);
if (nr == 0)
return (unused / 100) * sysctl_vfs_cache_pressure;
else
- return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
}
static struct ldlm_pool_ops ldlm_srv_pool_ops = {
}
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
+ int idx, enum ldlm_side client)
{
int rc;
ENTRY;
* count locks from all namespaces (if possible). Returns number of
* cached locks.
*/
-static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
+static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
{
unsigned long total = 0;
int nr_ns;
return total;
}
-static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
+static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
gfp_t gfp_mask)
{
unsigned long freed = 0;
* cached locks after shrink is finished. All namespaces are asked to
* cancel approximately equal amount of locks to keep balancing.
*/
-static int ldlm_pools_shrink(ldlm_side_t client, int nr,
- gfp_t gfp_mask)
+static int ldlm_pools_shrink(enum ldlm_side client, int nr, gfp_t gfp_mask)
{
unsigned long total = 0;
#endif /* HAVE_SHRINKER_COUNT */
-int ldlm_pools_recalc(ldlm_side_t client)
+int ldlm_pools_recalc(enum ldlm_side client)
{
unsigned long nr_l = 0, nr_p = 0, l;
- struct ldlm_namespace *ns;
- struct ldlm_namespace *ns_old = NULL;
- int nr, equal = 0;
+ struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL;
+ int nr, equal = 0;
/* seconds of sleep if no active namespaces */
int time = client ? LDLM_POOL_CLI_DEF_RECALC_PERIOD :
LDLM_POOL_SRV_DEF_RECALC_PERIOD;
int ldlm_pool_shrink(struct ldlm_pool *pl,
int nr, gfp_t gfp_mask)
{
- return 0;
+ return 0;
}
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
- int idx, ldlm_side_t client)
+ int idx, enum ldlm_side client)
{
- return 0;
+ return 0;
}
void ldlm_pool_fini(struct ldlm_pool *pl)
void ldlm_pools_fini(void)
{
- return;
+ return;
}
-int ldlm_pools_recalc(ldlm_side_t client)
+int ldlm_pools_recalc(enum ldlm_side client)
{
- return 0;
+ return 0;
}
#endif /* HAVE_LRU_RESIZE_SUPPORT */
struct ldlm_namespace *ns;
int count = LDLM_RECLAIM_BATCH;
int ns_nr, nr_processed;
- ldlm_side_t ns_cli = LDLM_NAMESPACE_SERVER;
+ enum ldlm_side ns_cli = LDLM_NAMESPACE_SERVER;
cfs_duration_t age;
bool skip = true;
ENTRY;
* Enqueue a local lock (typically on a server).
*/
int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 *flags,
- ldlm_blocking_callback blocking,
- ldlm_completion_callback completion,
- ldlm_glimpse_callback glimpse,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type, union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 *flags,
+ ldlm_blocking_callback blocking,
+ ldlm_completion_callback completion,
+ ldlm_glimpse_callback glimpse,
void *data, __u32 lvb_len, enum lvb_type lvb_type,
- const __u64 *client_cookie,
- struct lustre_handle *lockh)
+ const __u64 *client_cookie,
+ struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
int err;
* Called after receiving reply from server.
*/
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- __u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh,int rc)
+ enum ldlm_type type, __u8 with_policy,
+ enum ldlm_mode mode, __u64 *flags, void *lvb,
+ __u32 lvb_len, struct lustre_handle *lockh, int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
struct req_capsule *pill = &req->rq_pill;
struct ldlm_request *dlm = NULL;
struct list_head head = LIST_HEAD_INIT(head);
- int flags, avail, to_free, pack = 0;
+ enum ldlm_lru_flags lru_flags;
+ int avail, to_free, pack = 0;
int rc;
ENTRY;
if (cancels == NULL)
cancels = &head;
if (ns_connect_cancelset(ns)) {
- /* Estimate the amount of available space in the request. */
- req_capsule_filled_sizes(pill, RCL_CLIENT);
- avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
+ /* Estimate the amount of available space in the request. */
+ req_capsule_filled_sizes(pill, RCL_CLIENT);
+ avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
- flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
+ lru_flags = ns_connect_lru_resize(ns) ?
+ LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
to_free = !ns_connect_lru_resize(ns) &&
- opc == LDLM_ENQUEUE ? 1 : 0;
+ opc == LDLM_ENQUEUE ? 1 : 0;
/* Cancel LRU locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
* RPC, which will make us slower. */
- if (avail > count)
- count += ldlm_cancel_lru_local(ns, cancels, to_free,
- avail - count, 0, flags);
- if (avail > count)
- pack = count;
- else
- pack = avail;
- req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
- ldlm_request_bufsize(pack, opc));
- }
+ if (avail > count)
+ count += ldlm_cancel_lru_local(ns, cancels, to_free,
+ avail - count, 0,
+ lru_flags);
+ if (avail > count)
+ pack = count;
+ else
+ pack = avail;
+ req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
+ ldlm_request_bufsize(pack, opc));
+ }
rc = ptlrpc_request_pack(req, version, opc);
if (rc) {
* pass it to the caller in \a reqp.
*/
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
- struct ldlm_enqueue_info *einfo,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, __u64 *flags,
+ struct ldlm_enqueue_info *einfo,
+ const struct ldlm_res_id *res_id,
+ union ldlm_policy_data const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async)
{
* Prepare and send a batched cancel RPC. It will include \a count lock
* handles of locks given in \a cancels list. */
int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
- int count, ldlm_cancel_flags_t flags)
+ int count, enum ldlm_cancel_flags flags)
{
- struct ptlrpc_request *req = NULL;
- struct obd_import *imp;
- int free, sent = 0;
- int rc = 0;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ struct obd_import *imp;
+ int free, sent = 0;
+ int rc = 0;
+ ENTRY;
LASSERT(exp != NULL);
LASSERT(count > 0);
* Lock must not have any readers or writers by this time.
*/
int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
struct obd_export *exp;
- int avail, flags, count = 1;
+ enum ldlm_lru_flags lru_flags;
+ int avail, count = 1;
__u64 rc = 0;
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
struct list_head cancels = LIST_HEAD_INIT(cancels);
ENTRY;
- /* concurrent cancels on the same handle can happen */
- lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
+ /* concurrent cancels on the same handle can happen */
+ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
if (lock == NULL) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed");
RETURN(0);
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, &cancels);
- exp = lock->l_conn_export;
- if (exp_connect_cancelset(exp)) {
- avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
- &RQF_LDLM_CANCEL,
- RCL_CLIENT, 0);
- LASSERT(avail > 0);
-
- ns = ldlm_lock_to_ns(lock);
- flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
- count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LCF_BL_AST, flags);
- }
+ exp = lock->l_conn_export;
+ if (exp_connect_cancelset(exp)) {
+ avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
+ &RQF_LDLM_CANCEL,
+ RCL_CLIENT, 0);
+ LASSERT(avail > 0);
+
+ ns = ldlm_lock_to_ns(lock);
+ lru_flags = ns_connect_lru_resize(ns) ?
+ LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
+ count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
+ LCF_BL_AST, lru_flags);
+ }
ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_cancel);
* Return the number of cancelled locks.
*/
int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags)
+ enum ldlm_cancel_flags cancel_flags)
{
struct list_head head = LIST_HEAD_INIT(head);
struct ldlm_lock *lock, *next;
int left = 0, bl_ast = 0;
__u64 rc;
- left = count;
+ left = count;
list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
- if (left-- == 0)
- break;
+ if (left-- == 0)
+ break;
- if (flags & LCF_LOCAL) {
- rc = LDLM_FL_LOCAL_ONLY;
- ldlm_lock_cancel(lock);
- } else {
- rc = ldlm_cli_cancel_local(lock);
- }
+ if (cancel_flags & LCF_LOCAL) {
+ rc = LDLM_FL_LOCAL_ONLY;
+ ldlm_lock_cancel(lock);
+ } else {
+ rc = ldlm_cli_cancel_local(lock);
+ }
/* Until we have compound requests and can send LDLM_CANCEL
* requests batched with generic RPCs, we need to send cancels
* with the LDLM_FL_BL_AST flag in a separate RPC from
* the one being generated now. */
- if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
+ if (!(cancel_flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
list_del_init(&lock->l_bl_ast);
list_add(&lock->l_bl_ast, &head);
bl_ast++;
- continue;
- }
- if (rc == LDLM_FL_LOCAL_ONLY) {
- /* CANCEL RPC should not be sent to server. */
+ continue;
+ }
+ if (rc == LDLM_FL_LOCAL_ONLY) {
+ /* CANCEL RPC should not be sent to server. */
list_del_init(&lock->l_bl_ast);
LDLM_LOCK_RELEASE(lock);
count--;
* dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
* readahead requests, ...)
*/
-static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
+ enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
/* don't check added & count since we want to process all locks
* from unused list.
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
cfs_time_t cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
return LDLM_POLICY_CANCEL_LOCK;
}
-static ldlm_policy_res_t
+static enum ldlm_policy_res
ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
struct ldlm_lock *lock,
int unused, int added,
int count)
{
- ldlm_policy_res_t result;
+ enum ldlm_policy_res result;
result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
if (result == LDLM_POLICY_KEEP_LOCK)
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU. */
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
if ((added >= count) &&
cfs_time_before(cfs_time_current(),
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static
+enum ldlm_policy_res ldlm_cancel_default_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
/* Stop LRU processing when we reach past count or have checked all
* locks in LRU. */
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
- int, int);
+typedef enum ldlm_policy_res
+(*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count);
static ldlm_cancel_lru_policy_t
-ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
+ldlm_cancel_lru_policy(struct ldlm_namespace *ns, enum ldlm_lru_flags lru_flags)
{
- if (flags & LDLM_CANCEL_NO_WAIT)
- return ldlm_cancel_no_wait_policy;
-
- if (ns_connect_lru_resize(ns)) {
- if (flags & LDLM_CANCEL_SHRINK)
- /* We kill passed number of old locks. */
- return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR)
- return ldlm_cancel_lrur_policy;
- else if (flags & LDLM_CANCEL_PASSED)
- return ldlm_cancel_passed_policy;
- else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ if (lru_flags & LDLM_LRU_FLAG_NO_WAIT)
+ return ldlm_cancel_no_wait_policy;
+
+ if (ns_connect_lru_resize(ns)) {
+ if (lru_flags & LDLM_LRU_FLAG_SHRINK)
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
+ if (lru_flags & LDLM_LRU_FLAG_LRUR)
+ return ldlm_cancel_lrur_policy;
+ if (lru_flags & LDLM_LRU_FLAG_PASSED)
+ return ldlm_cancel_passed_policy;
+ else if (lru_flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
return ldlm_cancel_lrur_no_wait_policy;
- } else {
- if (flags & LDLM_CANCEL_AGED)
- return ldlm_cancel_aged_policy;
- }
+ } else {
+ if (lru_flags & LDLM_LRU_FLAG_AGED)
+ return ldlm_cancel_aged_policy;
+ }
- return ldlm_cancel_default_policy;
+ return ldlm_cancel_default_policy;
}
/**
*
* Calling policies for enabled LRU resize:
* ----------------------------------------
- * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
- * cancel not more than \a count locks;
+ * flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
+ * cancel not more than \a count locks;
*
- * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located
+ * at the beginning of LRU list);
*
- * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- * memory pressre policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according
+ * to memory pressre policy function;
*
- * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy"
*
- * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
- * (typically before replaying locks) w/o
- * sending any RPCs or waiting for any
- * outstanding RPC to complete.
+ * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
+ * (typically before replaying locks) w/o
+ * sending any RPCs or waiting for any
+ * outstanding RPC to complete.
*/
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
- int flags)
+ enum ldlm_lru_flags lru_flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
+ int no_wait = lru_flags & (LDLM_LRU_FLAG_NO_WAIT |
+ LDLM_LRU_FLAG_LRUR_NO_WAIT);
ENTRY;
spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- remained = unused;
+ unused = ns->ns_nr_unused;
+ remained = unused;
- if (!ns_connect_lru_resize(ns))
- count += unused - ns->ns_max_unused;
+ if (!ns_connect_lru_resize(ns))
+ count += unused - ns->ns_max_unused;
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
+ pf = ldlm_cancel_lru_policy(ns, lru_flags);
+ LASSERT(pf != NULL);
while (!list_empty(&ns->ns_unused_list)) {
- ldlm_policy_res_t result;
+ enum ldlm_policy_res result;
cfs_time_t last_use = 0;
- /* all unused locks */
- if (remained-- <= 0)
- break;
+ /* all unused locks */
+ if (remained-- <= 0)
+ break;
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
+ /* For any flags, stop scanning if @max is reached. */
+ if (max && added >= max)
+ break;
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
- l_lru) {
- /* No locks which got blocking requests. */
+ l_lru) {
+ /* No locks which got blocking requests. */
LASSERT(!ldlm_is_bl_ast(lock));
if (no_wait && ldlm_is_skipped(lock))
}
int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, ldlm_cancel_flags_t cancel_flags,
- int flags)
+ int count, int max,
+ enum ldlm_cancel_flags cancel_flags,
+ enum ldlm_lru_flags lru_flags)
{
- int added;
- added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
- if (added <= 0)
- return added;
- return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
+ int added;
+
+ added = ldlm_prepare_lru_list(ns, cancels, count, max, lru_flags);
+ if (added <= 0)
+ return added;
+
+ return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
}
/**
* callback will be performed in this function.
*/
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t cancel_flags,
- int flags)
+ enum ldlm_cancel_flags cancel_flags,
+ enum ldlm_lru_flags lru_flags)
{
struct list_head cancels = LIST_HEAD_INIT(cancels);
int count, rc;
/* Just prepare the list of locks, do not actually cancel them yet.
* Locks are cancelled later in a separate thread. */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, lru_flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
RETURN(count);
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque)
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 lock_flags,
+ enum ldlm_cancel_flags cancel_flags,
+ void *opaque)
{
- struct ldlm_lock *lock;
- int count = 0;
- ENTRY;
+ struct ldlm_lock *lock;
+ int count = 0;
+ ENTRY;
- lock_res(res);
+ lock_res(res);
list_for_each_entry(lock, &res->lr_granted, l_res_link) {
if (opaque != NULL && lock->l_ast_data != opaque) {
LDLM_ERROR(lock, "data %p doesn't match opaque %p",
* Destroy \a cancels at the end.
*/
int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
+ struct ptlrpc_request *req,
+ enum ldlm_cancel_flags flags)
{
- struct ldlm_lock *lock;
- int res = 0;
- ENTRY;
+ struct ldlm_lock *lock;
+ int res = 0;
+ ENTRY;
if (list_empty(cancels) || count == 0)
RETURN(0);
* to notify the server. */
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque)
{
struct ldlm_resource *res;
struct list_head cancels = LIST_HEAD_INIT(cancels);
* If flags & LCF_LOCAL, throw the locks away without trying
* to notify the server. */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_cancel_flags_t flags, void *opaque)
+ const struct ldlm_res_id *res_id,
+ enum ldlm_cancel_flags flags, void *opaque)
{
struct ldlm_cli_cancel_arg arg = {
.lc_flags = flags,
ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
* count parameter */
canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+ LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
canceled, ldlm_ns_name(ns));
if (ns_connect_lru_resize(ns)) {
int canceled, unused = ns->ns_nr_unused;
- /* Try to cancel all @ns_nr_unused locks. */
+ /* Try to cancel all @ns_nr_unused locks. */
canceled = ldlm_cancel_lru(ns, unused, 0,
- LDLM_CANCEL_PASSED);
- if (canceled < unused) {
- CDEBUG(D_DLMTRACE,
- "not all requested locks are canceled, "
- "requested: %d, canceled: %d\n", unused,
- canceled);
- return -EINVAL;
- }
- } else {
- tmp = ns->ns_max_unused;
- ns->ns_max_unused = 0;
- ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
- ns->ns_max_unused = tmp;
- }
- return count;
- }
+ LDLM_LRU_FLAG_PASSED);
+ if (canceled < unused) {
+ CDEBUG(D_DLMTRACE,
+ "not all requested locks are canceled, "
+ "requested: %d, canceled: %d\n", unused,
+ canceled);
+ return -EINVAL;
+ }
+ } else {
+ tmp = ns->ns_max_unused;
+ ns->ns_max_unused = 0;
+ ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
+ ns->ns_max_unused = tmp;
+ }
+ return count;
+ }
tmp = simple_strtoul(dummy, &end, 0);
if (dummy == end) {
}
lru_resize = (tmp == 0);
- if (ns_connect_lru_resize(ns)) {
- if (!lru_resize)
- ns->ns_max_unused = (unsigned int)tmp;
+ if (ns_connect_lru_resize(ns)) {
+ if (!lru_resize)
+ ns->ns_max_unused = tmp;
- if (tmp > ns->ns_nr_unused)
- tmp = ns->ns_nr_unused;
- tmp = ns->ns_nr_unused - tmp;
+ if (tmp > ns->ns_nr_unused)
+ tmp = ns->ns_nr_unused;
+ tmp = ns->ns_nr_unused - tmp;
- CDEBUG(D_DLMTRACE,
- "changing namespace %s unused locks from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_nr_unused,
- (unsigned int)tmp);
- ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
-
- if (!lru_resize) {
- CDEBUG(D_DLMTRACE,
- "disable lru_resize for namespace %s\n",
- ldlm_ns_name(ns));
- ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
- }
+ CDEBUG(D_DLMTRACE,
+ "changing namespace %s unused locks from %u to %u\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused,
+ (unsigned int)tmp);
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
+
+ if (!lru_resize) {
+ CDEBUG(D_DLMTRACE,
+ "disable lru_resize for namespace %s\n",
+ ldlm_ns_name(ns));
+ ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
+ }
} else {
- CDEBUG(D_DLMTRACE,
- "changing namespace %s max_unused from %u to %u\n",
- ldlm_ns_name(ns), ns->ns_max_unused,
- (unsigned int)tmp);
- ns->ns_max_unused = (unsigned int)tmp;
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
+ CDEBUG(D_DLMTRACE,
+ "changing namespace %s max_unused from %u to %u\n",
+ ldlm_ns_name(ns), ns->ns_max_unused,
+ (unsigned int)tmp);
+ ns->ns_max_unused = (unsigned int)tmp;
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
/* Make sure that LRU resize was originally supported before
* turning it on here. */
.hs_put = ldlm_res_hop_put
};
-typedef struct {
- ldlm_ns_type_t nsd_type;
- /** hash bucket bits */
- unsigned nsd_bkt_bits;
- /** hash bits */
- unsigned nsd_all_bits;
- /** hash operations */
+typedef struct ldlm_ns_hash_def {
+ enum ldlm_ns_type nsd_type;
+ /** hash bucket bits */
+ unsigned nsd_bkt_bits;
+ /** hash bits */
+ unsigned nsd_all_bits;
+ /** hash operations */
struct cfs_hash_ops *nsd_hops;
} ldlm_ns_hash_def_t;
-static ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
+static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] =
{
{
.nsd_type = LDLM_NS_TYPE_MDC,
* Create and initialize new empty namespace.
*/
struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client,
- ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type)
+ enum ldlm_side client,
+ enum ldlm_appetite apt,
+ enum ldlm_ns_type ns_type)
{
- struct ldlm_namespace *ns = NULL;
- struct ldlm_ns_bucket *nsb;
- ldlm_ns_hash_def_t *nsd;
- struct cfs_hash_bd bd;
- int idx;
- int rc;
- ENTRY;
+ struct ldlm_namespace *ns = NULL;
+ struct ldlm_ns_bucket *nsb;
+ struct ldlm_ns_hash_def *nsd;
+ struct cfs_hash_bd bd;
+ int idx;
+ int rc;
+ ENTRY;
LASSERT(obd != NULL);
}
/** Register \a ns in the list of namespaces */
-void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
+void ldlm_namespace_register(struct ldlm_namespace *ns, enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(list_empty(&ns->ns_list_chain));
}
/** Unregister \a ns from the list of namespaces. */
-void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
+void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(!list_empty(&ns->ns_list_chain));
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
/** Should be called with ldlm_namespace_lock(client) taken. */
void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
- ldlm_side_t client)
+ enum ldlm_side client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
{
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
LASSERT(!list_empty(ldlm_namespace_list(client)));
}
/** Create and initialize new resource. */
-static struct ldlm_resource *ldlm_resource_new(ldlm_type_t type)
+static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
{
struct ldlm_resource *res;
int idx;
if (res == NULL)
return NULL;
- if (type == LDLM_EXTENT) {
+ if (ldlm_type == LDLM_EXTENT) {
OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab,
sizeof(*res->lr_itree) * LCK_MODE_NUM);
if (res->lr_itree == NULL) {
*/
struct ldlm_resource *
ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
- const struct ldlm_res_id *name, ldlm_type_t type, int create)
+ const struct ldlm_res_id *name, enum ldlm_type type,
+ int create)
{
struct hlist_node *hnode;
struct ldlm_resource *res = NULL;
if (res == NULL)
return ERR_PTR(-ENOMEM);
- res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- res->lr_name = *name;
- res->lr_type = type;
+ res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
+ res->lr_name = *name;
+ res->lr_type = type;
cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
* Print information about all locks in all namespaces on this node to debug
* log.
*/
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
{
struct list_head *tmp;
- if (!((libcfs_debug | D_ERROR) & level))
- return;
+ if (!((libcfs_debug | D_ERROR) & level))
+ return;
mutex_lock(ldlm_namespace_lock(client));
list_for_each(tmp, ldlm_namespace_list(client)) {
- struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns;
+
ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
- ldlm_namespace_dump(level, ns);
- }
+ ldlm_namespace_dump(level, ns);
+ }
mutex_unlock(ldlm_namespace_lock(client));
}
struct lfsck_lock_handle {
struct lustre_handle llh_pdo_lh;
struct lustre_handle llh_reg_lh;
- ldlm_mode_t llh_pdo_mode;
- ldlm_mode_t llh_reg_mode;
+ enum ldlm_mode llh_pdo_mode;
+ enum ldlm_mode llh_reg_mode;
};
struct lfsck_thread_info {
struct lfsck_async_interpret_args lti_laia2;
struct lfsck_start lti_start;
struct lfsck_stop lti_stop;
- ldlm_policy_data_t lti_policy;
+ union ldlm_policy_data lti_policy;
struct ldlm_enqueue_info lti_einfo;
struct ldlm_res_id lti_resid;
union {
struct lu_fid *fid, bool locked);
int lfsck_ibits_lock(const struct lu_env *env, struct lfsck_instance *lfsck,
struct dt_object *obj, struct lustre_handle *lh,
- __u64 bits, ldlm_mode_t mode);
-void lfsck_ibits_unlock(struct lustre_handle *lh, ldlm_mode_t mode);
+ __u64 bits, enum ldlm_mode mode);
+void lfsck_ibits_unlock(struct lustre_handle *lh, enum ldlm_mode mode);
int lfsck_lock(const struct lu_env *env, struct lfsck_instance *lfsck,
struct dt_object *obj, const char *name,
- struct lfsck_lock_handle *llh, __u64 bits, ldlm_mode_t mode);
+ struct lfsck_lock_handle *llh, __u64 bits, enum ldlm_mode mode);
void lfsck_unlock(struct lfsck_lock_handle *llh);
int lfsck_find_mdt_idx_by_fid(const struct lu_env *env,
struct lfsck_instance *lfsck,
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct lu_attr *la = &info->lti_la;
- ldlm_policy_data_t *policy = &info->lti_policy;
+ union ldlm_policy_data *policy = &info->lti_policy;
struct ldlm_res_id *resid = &info->lti_resid;
struct lfsck_instance *lfsck = com->lc_lfsck;
struct dt_device *dev = lfsck->li_bottom;
static int __lfsck_ibits_lock(const struct lu_env *env,
struct lfsck_instance *lfsck,
- struct dt_object *obj,
- struct ldlm_res_id *resid,
- struct lustre_handle *lh,
- __u64 bits, ldlm_mode_t mode)
+ struct dt_object *obj, struct ldlm_res_id *resid,
+ struct lustre_handle *lh, __u64 bits,
+ enum ldlm_mode mode)
{
struct lfsck_thread_info *info = lfsck_env_info(env);
- ldlm_policy_data_t *policy = &info->lti_policy;
+ union ldlm_policy_data *policy = &info->lti_policy;
__u64 flags = LDLM_FL_ATOMIC_CB;
int rc;
*/
int lfsck_ibits_lock(const struct lu_env *env, struct lfsck_instance *lfsck,
struct dt_object *obj, struct lustre_handle *lh,
- __u64 bits, ldlm_mode_t mode)
+ __u64 bits, enum ldlm_mode mode)
{
struct ldlm_res_id *resid = &lfsck_env_info(env)->lti_resid;
* \param[in] lh pointer to the lock handle
* \param[in] mode the mode for the ldlm lock to be released
*/
-void lfsck_ibits_unlock(struct lustre_handle *lh, ldlm_mode_t mode)
+void lfsck_ibits_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
{
if (lustre_handle_is_used(lh)) {
ldlm_lock_decref(lh, mode);
* \retval negative error number on failure
*/
int lfsck_lock(const struct lu_env *env, struct lfsck_instance *lfsck,
- struct dt_object *obj, const char *name,
- struct lfsck_lock_handle *llh, __u64 bits, ldlm_mode_t mode)
+ struct dt_object *obj, const char *name,
+ struct lfsck_lock_handle *llh, __u64 bits, enum ldlm_mode mode)
{
struct ldlm_res_id *resid = &lfsck_env_info(env)->lti_resid;
int rc;
static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
struct file *file)
{
- ldlm_policy_data_t policy = {
+ union ldlm_policy_data policy = {
.l_inodebits = { MDS_INODELOCK_OPEN },
};
__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct lustre_handle lockh;
- int lockmode;
+ enum ldlm_mode lockmode;
int rc = 0;
ENTRY;
.ei_cbdata = file_lock,
};
struct md_op_data *op_data;
- struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock = {{0}};
+ struct lustre_handle lockh = { 0 };
+ union ldlm_policy_data flock = { { 0 } };
int fl_type = file_lock->fl_type;
__u64 flags = 0;
int rc;
* \param l_req_mode [IN] searched lock mode
* \retval boolean, true iff all bits are found
*/
-int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
+int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode)
{
- struct lustre_handle lockh;
- ldlm_policy_data_t policy;
- ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
- (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
- struct lu_fid *fid;
+ struct lustre_handle lockh;
+ union ldlm_policy_data policy;
+ enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
+ (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
+ struct lu_fid *fid;
__u64 flags;
- int i;
- ENTRY;
+ int i;
+ ENTRY;
if (!inode)
RETURN(0);
RETURN(*bits == 0);
}
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode)
+enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags,
+ enum ldlm_mode mode)
{
- ldlm_policy_data_t policy = { .l_inodebits = {bits}};
- struct lu_fid *fid;
- ldlm_mode_t rc;
- ENTRY;
+ union ldlm_policy_data policy = { .l_inodebits = { bits } };
+ struct lu_fid *fid;
+ enum ldlm_mode rc;
+ ENTRY;
- fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+ fid = &ll_i2info(inode)->lli_fid;
+ CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
fid, LDLM_IBITS, &policy, mode, lockh);
* Apply the layout to the inode. Layout lock is held and will be released
* in this function.
*/
-static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
+static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
- struct lookup_intent it;
- struct lustre_handle lockh;
- ldlm_mode_t mode;
+ struct lookup_intent it;
+ struct lustre_handle lockh;
+ enum ldlm_mode mode;
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_IBITS,
.ei_mode = LCK_CR,
extern struct file_operations ll_file_operations_noflock;
extern struct inode_operations ll_file_inode_operations;
extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
- ldlm_mode_t l_req_mode);
-extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode);
+ enum ldlm_mode l_req_mode);
+extern enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags,
+ enum ldlm_mode mode);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr, size_t count);
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
+ unsigned long addr, size_t count);
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count);
static const struct vm_operations_struct ll_file_vm_ops;
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
+ unsigned long addr, size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_CACHE_SHIFT);
- policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ policy->l_extent.end = (policy->l_extent.start + count - 1) |
~PAGE_MASK;
}
struct vvp_io *vio, struct cl_io *io)
{
struct vvp_thread_info *vti = vvp_env_info(env);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &vti->vti_descr;
- ldlm_policy_data_t policy;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct cl_lock_descr *descr = &vti->vti_descr;
+ union ldlm_policy_data policy;
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
struct iovec iov;
struct iov_iter i;
int result = 0;
ENTRY;
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- if (!cl_is_normalio(env, io))
- RETURN(0);
+ if (!cl_is_normalio(env, io))
+ RETURN(0);
/* nfs or loop back device write */
if (vio->vui_iter == NULL)
struct lookup_intent *oit,
struct ptlrpc_request **req)
{
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
struct lustre_handle lockh = { 0 };
struct md_op_data *op_data;
struct ll_inode_info *lli = ll_i2info(inode);
NULL)
static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
- struct md_op_data *op_data,
- __u32 op_tgt, ldlm_mode_t mode, int bits, int flag)
-{
- struct lu_fid *fid = md_op_data_fid(op_data, flag);
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_policy_data_t policy = {{ 0 }};
- int rc = 0;
+ struct md_op_data *op_data, __u32 op_tgt,
+ enum ldlm_mode mode, int bits, int flag)
+{
+ struct lu_fid *fid = md_op_data_fid(op_data, flag);
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ union ldlm_policy_data policy = { { 0 } };
+ int rc = 0;
ENTRY;
if (!fid_is_sane(fid))
EXPORT_SYMBOL(lmv_free_memmd);
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque)
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, enum ldlm_cancel_flags flags,
+ void *opaque)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- int rc = 0;
- int err;
- __u32 i;
- ENTRY;
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ int rc = 0;
+ __u32 i;
+ ENTRY;
- LASSERT(fid != NULL);
+ LASSERT(fid != NULL);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
struct lmv_tgt_desc *tgt = lmv->tgts[i];
+ int err;
if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
continue;
RETURN(rc);
}
-ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
+enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, struct lustre_handle *lockh)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
int tgt;
int i;
ENTRY;
static int lod_object_unlock_internal(const struct lu_env *env,
struct dt_object *dt,
struct ldlm_enqueue_info *einfo,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
struct lod_slave_locks *slave_locks = einfo->ei_cbdata;
int rc = 0;
struct lustre_handle *lockh, __u64 extra_lock_flags);
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
+ struct list_head *cancels, enum ldlm_mode mode,
__u64 bits);
/* mdc/mdc_request.c */
int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
void *ea, size_t ealen, struct ptlrpc_request **request);
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request);
+ struct ptlrpc_request **request);
int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque);
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque);
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
struct lu_fid *fid, __u64 *bits);
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo);
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh);
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, struct lustre_handle *lockh);
static inline int mdc_prep_elc_req(struct obd_export *exp,
struct ptlrpc_request *req, int opc,
RETURN(0);
}
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, enum ldlm_type type,
+ union ldlm_policy_data *policy,
+ enum ldlm_mode mode, struct lustre_handle *lockh)
{
struct ldlm_res_id res_id;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
ENTRY;
fid_build_reg_res_name(fid, &res_id);
RETURN(rc);
}
-int mdc_cancel_unused(struct obd_export *exp,
- const struct lu_fid *fid,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
+int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque)
{
- struct ldlm_res_id res_id;
- struct obd_device *obd = class_exp2obd(exp);
- int rc;
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ldlm_res_id res_id;
+ int rc;
- ENTRY;
+ ENTRY;
- fid_build_reg_res_name(fid, &res_id);
- rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
- policy, mode, flags, opaque);
- RETURN(rc);
+ fid_build_reg_res_name(fid, &res_id);
+ rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
+ policy, mode, flags, opaque);
+ RETURN(rc);
}
int mdc_null_inode(struct obd_export *exp,
__u32 lmmsize = op_data->op_data_size;
struct list_head cancels = LIST_HEAD_INIT(cancels);
int count = 0;
- int mode;
+ enum ldlm_mode mode;
int rc;
- ENTRY;
+ ENTRY;
- it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
+ it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
- /* XXX: openlock is not cancelled for cross-refs. */
- /* If inode is known, cancel conflicting OPEN locks. */
+ /* XXX: openlock is not cancelled for cross-refs. */
+ /* If inode is known, cancel conflicting OPEN locks. */
if (fid_is_sane(&op_data->op_fid2)) {
if (it->it_flags & MDS_OPEN_LEASE) { /* try to get lease */
if (it->it_flags & FMODE_WRITE)
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, __u64 extra_lock_flags)
{
- struct obd_device *obddev = class_exp2obd(exp);
- struct ptlrpc_request *req = NULL;
- __u64 flags, saved_flags = extra_lock_flags;
- int rc;
- struct ldlm_res_id res_id;
- static const ldlm_policy_data_t lookup_policy =
- { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
- static const ldlm_policy_data_t update_policy =
- { .l_inodebits = { MDS_INODELOCK_UPDATE } };
- static const ldlm_policy_data_t layout_policy =
- { .l_inodebits = { MDS_INODELOCK_LAYOUT } };
- static const ldlm_policy_data_t getxattr_policy = {
- .l_inodebits = { MDS_INODELOCK_XATTR } };
- int generation, resends = 0;
- struct ldlm_reply *lockrep;
- enum lvb_type lvb_type = 0;
- ENTRY;
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct ptlrpc_request *req = NULL;
+ __u64 flags, saved_flags = extra_lock_flags;
+ struct ldlm_res_id res_id;
+ static const union ldlm_policy_data lookup_policy = {
+ .l_inodebits = { MDS_INODELOCK_LOOKUP } };
+ static const union ldlm_policy_data update_policy = {
+ .l_inodebits = { MDS_INODELOCK_UPDATE } };
+ static const union ldlm_policy_data layout_policy = {
+ .l_inodebits = { MDS_INODELOCK_LAYOUT } };
+ static const union ldlm_policy_data getxattr_policy = {
+ .l_inodebits = { MDS_INODELOCK_XATTR } };
+ int generation, resends = 0;
+ struct ldlm_reply *lockrep;
+ enum lvb_type lvb_type = 0;
+ int rc;
+ ENTRY;
- LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
- einfo->ei_type);
- fid_build_reg_res_name(&op_data->op_fid1, &res_id);
+ LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
+ einfo->ei_type);
+ fid_build_reg_res_name(&op_data->op_fid1, &res_id);
if (it != NULL) {
LASSERT(policy == NULL);
LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT));
}
- /* If we already have a matching lock, then cancel the new
- * one. We have to set the data here instead of in
- * mdc_enqueue, because we need to use the child's inode as
- * the l_ast_data to match, and that's not available until
- * intent_finish has performed the iget().) */
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- ldlm_policy_data_t policy = lock->l_policy_data;
- LDLM_DEBUG(lock, "matching against this");
+ /* If we already have a matching lock, then cancel the new
+ * one. We have to set the data here instead of in
+ * mdc_enqueue, because we need to use the child's inode as
+ * the l_ast_data to match, and that's not available until
+ * intent_finish has performed the iget().) */
+ lock = ldlm_handle2lock(lockh);
+ if (lock) {
+ union ldlm_policy_data policy = lock->l_policy_data;
+ LDLM_DEBUG(lock, "matching against this");
LASSERTF(fid_res_name_eq(&mdt_body->mbo_fid1,
&lock->l_resource->lr_name),
}
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
- struct lu_fid *fid, __u64 *bits)
+ struct lu_fid *fid, __u64 *bits)
{
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that. */
- struct ldlm_res_id res_id;
- struct lustre_handle lockh;
- ldlm_policy_data_t policy;
- ldlm_mode_t mode;
- ENTRY;
+ /* We could just return 1 immediately, but since we should only
+ * be called in revalidate_it if we already have a lock, let's
+ * verify that. */
+ struct ldlm_res_id res_id;
+ struct lustre_handle lockh;
+ union ldlm_policy_data policy;
+ enum ldlm_mode mode;
+ ENTRY;
if (it->d.lustre.it_lock_handle) {
lockh.cookie = it->d.lustre.it_lock_handle;
/*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
* for statahead currently. Consider CMD in future, such two bits
* maybe managed by different MDS, should be adjusted then. */
- ldlm_policy_data_t policy = {
- .l_inodebits = { MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_UPDATE }
- };
+ union ldlm_policy_data policy = {
+ .l_inodebits = { MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_UPDATE } };
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
ENTRY;
* found by @fid. Found locks are added into @cancel list. Returns the amount of
* locks added to @cancels list. */
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
- __u64 bits)
+ struct list_head *cancels, enum ldlm_mode mode,
+ __u64 bits)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- ldlm_policy_data_t policy = { {0} };
+ union ldlm_policy_data policy = { {0} };
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
}
int mdd_local_file_create(const struct lu_env *env, struct mdd_device *mdd,
- const struct lu_fid *pfid, const char *name, __u32 mode,
- struct lu_fid *fid)
+ const struct lu_fid *pfid, const char *name,
+ __u32 mode, struct lu_fid *fid)
{
- struct dt_object *parent, *dto;
- int rc;
+ struct dt_object *parent, *dto;
+ int rc;
ENTRY;
struct md_object *obj,
struct lustre_handle *lh,
struct ldlm_enqueue_info *einfo,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
return dt_object_lock(env, mdd_object_child(mdd_obj), lh,
static int mdd_object_unlock(const struct lu_env *env,
struct md_object *obj,
struct ldlm_enqueue_info *einfo,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
return dt_object_unlock(env, mdd_object_child(mdd_obj), einfo, policy);
[LCK_GROUP] = MDL_GROUP
};
-ldlm_mode_t mdt_dlm_lock_modes[] = {
- [MDL_MINMODE] = LCK_MINMODE,
- [MDL_EX] = LCK_EX,
- [MDL_PW] = LCK_PW,
- [MDL_PR] = LCK_PR,
- [MDL_CW] = LCK_CW,
- [MDL_CR] = LCK_CR,
- [MDL_NL] = LCK_NL,
- [MDL_GROUP] = LCK_GROUP
+enum ldlm_mode mdt_dlm_lock_modes[] = {
+ [MDL_MINMODE] = LCK_MINMODE,
+ [MDL_EX] = LCK_EX,
+ [MDL_PW] = LCK_PW,
+ [MDL_PR] = LCK_PR,
+ [MDL_CW] = LCK_CW,
+ [MDL_CR] = LCK_CR,
+ [MDL_NL] = LCK_NL,
+ [MDL_GROUP] = LCK_GROUP
};
static struct mdt_device *mdt_dev(struct lu_device *d);
rep->lock_policy_res1 |= op_flag;
}
-void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
+void mdt_lock_reg_init(struct mdt_lock_handle *lh, enum ldlm_mode lm)
{
- lh->mlh_pdo_hash = 0;
- lh->mlh_reg_mode = lm;
+ lh->mlh_pdo_hash = 0;
+ lh->mlh_reg_mode = lm;
lh->mlh_rreg_mode = lm;
- lh->mlh_type = MDT_REG_LOCK;
+ lh->mlh_type = MDT_REG_LOCK;
}
-void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lock_mode,
+void mdt_lock_pdo_init(struct mdt_lock_handle *lh, enum ldlm_mode lock_mode,
const struct lu_name *lname)
{
lh->mlh_reg_mode = lock_mode;
return 1;
}
-int mdt_remote_object_lock(struct mdt_thread_info *mti,
- struct mdt_object *o, const struct lu_fid *fid,
- struct lustre_handle *lh, ldlm_mode_t mode,
- __u64 ibits, bool nonblock)
+int mdt_remote_object_lock(struct mdt_thread_info *mti, struct mdt_object *o,
+ const struct lu_fid *fid, struct lustre_handle *lh,
+ enum ldlm_mode mode, __u64 ibits, bool nonblock)
{
struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
- ldlm_policy_data_t *policy = &mti->mti_policy;
+ union ldlm_policy_data *policy = &mti->mti_policy;
struct ldlm_res_id *res_id = &mti->mti_res_id;
int rc = 0;
ENTRY;
struct mdt_lock_handle *lh, __u64 ibits,
bool nonblock)
{
- struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
- ldlm_policy_data_t *policy = &info->mti_policy;
- struct ldlm_res_id *res_id = &info->mti_res_id;
+ struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
+ union ldlm_policy_data *policy = &info->mti_policy;
+ struct ldlm_res_id *res_id = &info->mti_res_id;
__u64 dlmflags;
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
* \param mode lock mode
* \param decref force immediate lock releasing
*/
-static
-void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
- ldlm_mode_t mode, int decref)
+static void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
+ enum ldlm_mode mode, int decref)
{
- ENTRY;
+ ENTRY;
- if (lustre_handle_is_used(h)) {
- if (decref || !info->mti_has_trans ||
- !(mode & (LCK_PW | LCK_EX))){
- mdt_fid_unlock(h, mode);
- } else {
- struct mdt_device *mdt = info->mti_mdt;
- struct ldlm_lock *lock = ldlm_handle2lock(h);
- struct ptlrpc_request *req = mdt_info_req(info);
- int no_ack = 0;
+ if (lustre_handle_is_used(h)) {
+ if (decref || !info->mti_has_trans ||
+ !(mode & (LCK_PW | LCK_EX))) {
+ mdt_fid_unlock(h, mode);
+ } else {
+ struct mdt_device *mdt = info->mti_mdt;
+ struct ldlm_lock *lock = ldlm_handle2lock(h);
+ struct ptlrpc_request *req = mdt_info_req(info);
+ int no_ack = 0;
- LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
- h->cookie);
+ LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
+ h->cookie);
/* there is no request if mdt_object_unlock() is called
* from mdt_export_cleanup()->mdt_add_dirty_flag() */
if (likely(req != NULL)) {
}
static int mdt_intent_policy(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp, void *req_cookie,
- ldlm_mode_t mode, __u64 flags, void *data)
+ struct ldlm_lock **lockp, void *req_cookie,
+ enum ldlm_mode mode, __u64 flags, void *data)
{
struct tgt_session_info *tsi;
struct mdt_thread_info *info;
};
struct mdt_lock_handle {
- /* Lock type, reg for cross-ref use or pdo lock. */
- mdl_type_t mlh_type;
+ /* Lock type, reg for cross-ref use or pdo lock. */
+ mdl_type_t mlh_type;
- /* Regular lock */
- struct lustre_handle mlh_reg_lh;
- ldlm_mode_t mlh_reg_mode;
+ /* Regular lock */
+ struct lustre_handle mlh_reg_lh;
+ enum ldlm_mode mlh_reg_mode;
- /* Pdirops lock */
- struct lustre_handle mlh_pdo_lh;
- ldlm_mode_t mlh_pdo_mode;
- unsigned int mlh_pdo_hash;
+ /* Pdirops lock */
+ struct lustre_handle mlh_pdo_lh;
+ enum ldlm_mode mlh_pdo_mode;
+ unsigned int mlh_pdo_hash;
/* Remote regular lock */
- struct lustre_handle mlh_rreg_lh;
- ldlm_mode_t mlh_rreg_mode;
+ struct lustre_handle mlh_rreg_lh;
+ enum ldlm_mode mlh_rreg_mode;
};
enum {
* They should be initialized explicitly by the user themselves.
*/
- /* XXX: If something is in a union, make sure they do not conflict */
-
- struct lu_fid mti_tmp_fid1;
- struct lu_fid mti_tmp_fid2;
- ldlm_policy_data_t mti_policy; /* for mdt_object_lock() and
- * mdt_rename_lock() */
- struct ldlm_res_id mti_res_id; /* for mdt_object_lock() and
- mdt_rename_lock() */
- union {
- struct obd_uuid uuid[2]; /* for mdt_seq_init_cli() */
- char ns_name[48]; /* for mdt_init0() */
- struct lustre_cfg_bufs bufs; /* for mdt_stack_fini() */
- struct obd_statfs osfs; /* for mdt_statfs() */
+ /* XXX: If something is in a union, make sure they do not conflict */
+ struct lu_fid mti_tmp_fid1;
+ struct lu_fid mti_tmp_fid2;
+ union ldlm_policy_data mti_policy; /* for mdt_object_lock() */
+ struct ldlm_res_id mti_res_id; /* and mdt_rename_lock() */
+ union {
+ struct obd_uuid uuid[2]; /* for mdt_seq_init_cli() */
+ char ns_name[48];/* for mdt_init0() */
+ struct lustre_cfg_bufs bufs; /* for mdt_stack_fini() */
+ struct obd_statfs osfs; /* for mdt_statfs() */
struct {
/* for mdt_readpage() */
struct lu_rdpg mti_rdpg;
void mdt_clear_disposition(struct mdt_thread_info *info,
struct ldlm_reply *rep, __u64 op_flag);
-void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lock_mode,
+void mdt_lock_pdo_init(struct mdt_lock_handle *lh, enum ldlm_mode lock_mode,
const struct lu_name *lname);
-void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm);
+void mdt_lock_reg_init(struct mdt_lock_handle *lh, enum ldlm_mode lm);
int mdt_lock_setup(struct mdt_thread_info *info, struct mdt_object *mo,
struct mdt_lock_handle *lh);
int mdt_remote_object_lock(struct mdt_thread_info *mti,
struct mdt_object *o, const struct lu_fid *fid,
struct lustre_handle *lh,
- ldlm_mode_t mode, __u64 ibits, bool nonblock);
+ enum ldlm_mode mode, __u64 ibits, bool nonblock);
enum mdt_name_flags {
MNF_FIX_ANON = 1,
/* Issues dlm lock on passed @ns, @f stores it lock handle into @lh. */
static inline int mdt_fid_lock(struct ldlm_namespace *ns,
- struct lustre_handle *lh,
- ldlm_mode_t mode,
- ldlm_policy_data_t *policy,
- const struct ldlm_res_id *res_id,
+ struct lustre_handle *lh, enum ldlm_mode mode,
+ union ldlm_policy_data *policy,
+ const struct ldlm_res_id *res_id,
__u64 flags, const __u64 *client_cookie)
{
- int rc;
+ int rc;
- LASSERT(ns != NULL);
- LASSERT(lh != NULL);
+ LASSERT(ns != NULL);
+ LASSERT(lh != NULL);
- rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, policy,
- mode, &flags, mdt_blocking_ast,
- ldlm_completion_ast, NULL, NULL, 0,
+ rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, policy,
+ mode, &flags, mdt_blocking_ast,
+ ldlm_completion_ast, NULL, NULL, 0,
LVB_T_NONE, client_cookie, lh);
- return rc == ELDLM_OK ? 0 : -EIO;
+ return rc == ELDLM_OK ? 0 : -EIO;
}
-static inline void mdt_fid_unlock(struct lustre_handle *lh,
- ldlm_mode_t mode)
+static inline void mdt_fid_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
{
- ldlm_lock_decref(lh, mode);
+ ldlm_lock_decref(lh, mode);
}
extern mdl_mode_t mdt_mdl_lock_modes[];
-extern ldlm_mode_t mdt_dlm_lock_modes[];
+extern enum ldlm_mode mdt_dlm_lock_modes[];
-static inline mdl_mode_t mdt_dlm_mode2mdl_mode(ldlm_mode_t mode)
+static inline mdl_mode_t mdt_dlm_mode2mdl_mode(enum ldlm_mode mode)
{
- LASSERT(IS_PO2(mode));
- return mdt_mdl_lock_modes[mode];
+ LASSERT(IS_PO2(mode));
+ return mdt_mdl_lock_modes[mode];
}
-static inline ldlm_mode_t mdt_mdl_mode2dlm_mode(mdl_mode_t mode)
+static inline enum ldlm_mode mdt_mdl_mode2dlm_mode(mdl_mode_t mode)
{
- LASSERT(IS_PO2(mode));
- return mdt_dlm_lock_modes[mode];
+ LASSERT(IS_PO2(mode));
+ return mdt_dlm_lock_modes[mode];
}
/* mdt_lvb.c */
struct mdt_lock_handle *lhc,
__u64 *ibits)
{
- struct md_attr *ma = &info->mti_attr;
- __u64 open_flags = info->mti_spec.sp_cr_flags;
- ldlm_mode_t lm = LCK_CR;
- bool acq_lease = !!(open_flags & MDS_OPEN_LEASE);
- bool try_layout = false;
- bool create_layout = false;
- int rc = 0;
+ struct md_attr *ma = &info->mti_attr;
+ __u64 open_flags = info->mti_spec.sp_cr_flags;
+ enum ldlm_mode lm = LCK_CR;
+ bool acq_lease = !!(open_flags & MDS_OPEN_LEASE);
+ bool try_layout = false;
+ bool create_layout = false;
+ int rc = 0;
ENTRY;
*ibits = 0;
struct mdt_object *s0_obj,
struct ldlm_enqueue_info *einfo)
{
- ldlm_policy_data_t *policy = &mti->mti_policy;
- int rc;
+ union ldlm_policy_data *policy = &mti->mti_policy;
+ int rc;
ENTRY;
if (!S_ISDIR(obj->mot_header.loh_attr))
* will be stored in einfo->ei_cbdata.
**/
static int mdt_lock_slaves(struct mdt_thread_info *mti, struct mdt_object *obj,
- ldlm_mode_t mode, __u64 ibits,
+ enum ldlm_mode mode, __u64 ibits,
struct mdt_lock_handle *s0_lh,
struct mdt_object **s0_objp,
struct ldlm_enqueue_info *einfo)
{
- ldlm_policy_data_t *policy = &mti->mti_policy;
- struct lu_buf *buf = &mti->mti_buf;
- struct lmv_mds_md_v1 *lmv;
- struct lu_fid *fid = &mti->mti_tmp_fid1;
+ union ldlm_policy_data *policy = &mti->mti_policy;
+ struct lu_buf *buf = &mti->mti_buf;
+ struct lmv_mds_md_v1 *lmv;
+ struct lu_fid *fid = &mti->mti_tmp_fid1;
int rc;
ENTRY;
{
struct ldlm_res_id *res = &info->mti_res_id;
struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
- ldlm_policy_data_t *policy = &info->mti_policy;
+ union ldlm_policy_data *policy = &info->mti_policy;
int rc;
/*
MDS_INODELOCK_UPDATE, false);
mdt_object_put(info->mti_env, obj);
} else {
- struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
- ldlm_policy_data_t *policy = &info->mti_policy;
- struct ldlm_res_id *res_id = &info->mti_res_id;
- __u64 flags = 0;
+ struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
+ union ldlm_policy_data *policy = &info->mti_policy;
+ struct ldlm_res_id *res_id = &info->mti_res_id;
+ __u64 flags = 0;
fid_build_reg_res_name(&LUSTRE_BFL_FID, res_id);
memset(policy, 0, sizeof *policy);
}
/* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, __u32 type,
- ldlm_policy_data_t *policy, __u32 mode,
+static int mgc_enqueue(struct obd_export *exp, enum ldlm_type type,
+ union ldlm_policy_data *policy, enum ldlm_mode mode,
__u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
RETURN(rc);
}
-static int mgc_cancel(struct obd_export *exp, ldlm_mode_t mode,
+static int mgc_cancel(struct obd_export *exp, enum ldlm_mode mode,
struct lustre_handle *lockh)
{
- ENTRY;
+ ENTRY;
- ldlm_lock_decref(lockh, mode);
+ ldlm_lock_decref(lockh, mode);
- RETURN(0);
+ RETURN(0);
}
static void mgc_notify_active(struct obd_device *unused)
struct lustre_handle lh = { 0 };
struct ofd_object *fo;
__u64 flags = 0;
- ldlm_mode_t lock_mode = LCK_PR;
+ enum ldlm_mode lock_mode = LCK_PR;
bool srvlock;
int rc;
ENTRY;
static int ofd_rw_hpreq_lock_match(struct ptlrpc_request *req,
struct ldlm_lock *lock)
{
- struct niobuf_remote *rnb;
- struct obd_ioobj *ioo;
- ldlm_mode_t mode;
- struct ldlm_extent ext;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ struct niobuf_remote *rnb;
+ struct obd_ioobj *ioo;
+ enum ldlm_mode mode;
+ struct ldlm_extent ext;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
ENTRY;
* \retval negative value on error
*/
int ofd_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock **lockp,
- void *req_cookie, ldlm_mode_t mode, __u64 flags,
+ void *req_cookie, enum ldlm_mode mode, __u64 flags,
void *data)
{
- struct ptlrpc_request *req = req_cookie;
- struct ldlm_lock *lock = *lockp, *l = NULL;
- struct ldlm_resource *res = lock->l_resource;
- ldlm_processing_policy policy;
- struct ost_lvb *res_lvb, *reply_lvb;
- struct ldlm_reply *rep;
- ldlm_error_t err;
- int idx, rc, only_liblustre = 1;
- struct ldlm_interval_tree *tree;
- struct ofd_intent_args arg;
- __u32 repsize[3] = {
+ struct ptlrpc_request *req = req_cookie;
+ struct ldlm_lock *lock = *lockp, *l = NULL;
+ struct ldlm_resource *res = lock->l_resource;
+ ldlm_processing_policy policy;
+ struct ost_lvb *res_lvb, *reply_lvb;
+ struct ldlm_reply *rep;
+ enum ldlm_error err;
+ int idx, rc, only_liblustre = 1;
+ struct ldlm_interval_tree *tree;
+ struct ofd_intent_args arg;
+ __u32 repsize[3] = {
[MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
[DLM_LOCKREPLY_OFF] = sizeof(*rep),
[DLM_REPLY_REC_OFF] = sizeof(*reply_lvb)
};
- struct ldlm_glimpse_work gl_work;
- struct list_head gl_list;
+ struct ldlm_glimpse_work gl_work;
+ struct list_head gl_list;
ENTRY;
INIT_LIST_HEAD(&gl_list);
/* ofd_dlm.c */
int ofd_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock **lockp,
- void *req_cookie, ldlm_mode_t mode, __u64 flags,
+ void *req_cookie, enum ldlm_mode mode, __u64 flags,
void *data);
static inline struct ofd_thread_info *ofd_info(const struct lu_env *env)
int ofd_destroy_by_fid(const struct lu_env *env, struct ofd_device *ofd,
const struct lu_fid *fid, int orphan)
{
- struct ofd_thread_info *info = ofd_info(env);
- struct lustre_handle lockh;
- __u64 flags = LDLM_FL_AST_DISCARD_DATA;
- __u64 rc = 0;
- ldlm_policy_data_t policy = {
- .l_extent = { 0, OBD_OBJECT_EOF }
- };
- struct ofd_object *fo;
+ struct ofd_thread_info *info = ofd_info(env);
+ struct lustre_handle lockh;
+ union ldlm_policy_data policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
+ struct ofd_object *fo;
+ __u64 flags = LDLM_FL_AST_DISCARD_DATA;
+ __u64 rc = 0;
ENTRY;
#define OTI_PVEC_SIZE 256
struct osc_thread_info {
- struct ldlm_res_id oti_resname;
- ldlm_policy_data_t oti_policy;
- struct cl_lock_descr oti_descr;
- struct cl_attr oti_attr;
- struct lustre_handle oti_handle;
- struct cl_page_list oti_plist;
+ struct ldlm_res_id oti_resname;
+ union ldlm_policy_data oti_policy;
+ struct cl_lock_descr oti_descr;
+ struct cl_attr oti_attr;
+ struct lustre_handle oti_handle;
+ struct cl_page_list oti_plist;
struct cl_io oti_io;
void *oti_pvec[OTI_PVEC_SIZE];
/**
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t ind);
-void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print (const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj, pgoff_t start, pgoff_t end);
+int osc_lvb_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb);
void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
return (struct cl_object *)&obj->oo_cl;
}
-static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
+static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
{
- LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
- if (mode == CLM_READ)
- return LCK_PR;
- else if (mode == CLM_WRITE)
- return LCK_PW;
- else
- return LCK_GROUP;
+ LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
+ if (mode == CLM_READ)
+ return LCK_PR;
+ if (mode == CLM_WRITE)
+ return LCK_PW;
+ return LCK_GROUP;
}
-static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode)
+static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
{
- LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
- if (mode == LCK_PR)
- return CLM_READ;
- else if (mode == LCK_PW)
- return CLM_WRITE;
- else
- return CLM_GROUP;
+ LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
+ if (mode == LCK_PR)
+ return CLM_READ;
+ if (mode == LCK_PW)
+ return CLM_WRITE;
+ return CLM_GROUP;
}
static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
int rc);
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset, int async, int agl);
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u32 type, union ldlm_policy_data *policy, __u32 mode,
__u64 *flags, void *data, struct lustre_handle *lockh,
int unref);
}
static void osc_lock_build_policy(const struct lu_env *env,
- const struct cl_lock *lock,
- ldlm_policy_data_t *policy)
+ const struct cl_lock *lock,
+ union ldlm_policy_data *policy)
{
- const struct cl_lock_descr *d = &lock->cll_descr;
+ const struct cl_lock_descr *d = &lock->cll_descr;
- osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
- policy->l_extent.gid = d->cld_gid;
+ osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
+ policy->l_extent.gid = d->cld_gid;
}
static __u64 osc_enq2ldlm_flags(__u32 enqflags)
struct osc_lock *oscl = cl2osc_lock(slice);
struct cl_lock *lock = slice->cls_lock;
struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
+ union ldlm_policy_data *policy = &info->oti_policy;
osc_enqueue_upcall_f upcall = osc_lock_upcall;
void *cookie = oscl;
bool async = false;
enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct lustre_handle lockh;
- struct ldlm_lock *lock = NULL;
- ldlm_mode_t mode;
- __u64 flags;
+ struct ldlm_res_id *resname = &info->oti_resname;
+ union ldlm_policy_data *policy = &info->oti_policy;
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock = NULL;
+ enum ldlm_mode mode;
+ __u64 flags;
ENTRY;
struct ll_fiemap_info_key *fmkey,
struct fiemap *fiemap, size_t *buflen)
{
- struct obd_export *exp = osc_export(cl2osc(obj));
- struct ldlm_res_id resid;
- ldlm_policy_data_t policy;
- struct lustre_handle lockh;
- ldlm_mode_t mode = 0;
- struct ptlrpc_request *req;
- struct fiemap *reply;
- char *tmp;
- int rc;
+ struct obd_export *exp = osc_export(cl2osc(obj));
+ struct ldlm_res_id resid;
+ union ldlm_policy_data policy;
+ struct lustre_handle lockh;
+ enum ldlm_mode mode = LCK_MINMODE;
+ struct ptlrpc_request *req;
+ struct fiemap *reply;
+ char *tmp;
+ int rc;
ENTRY;
fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
struct osc_thread_info *info;
struct ldlm_res_id *resname;
struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
+ union ldlm_policy_data *policy;
+ enum ldlm_mode dlmmode;
__u64 flags;
might_sleep();
RETURN(result);
}
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end)
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj, pgoff_t start, pgoff_t end)
{
- memset(policy, 0, sizeof *policy);
- policy->l_extent.start = cl_offset(obj, start);
- policy->l_extent.end = cl_offset(obj, end + 1) - 1;
+ memset(policy, 0, sizeof *policy);
+ policy->l_extent.start = cl_offset(obj, start);
+ policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
static const char *osc_list(struct list_head *head)
struct osc_enqueue_args {
struct obd_export *oa_exp;
- ldlm_type_t oa_type;
- ldlm_mode_t oa_mode;
+ enum ldlm_type oa_type;
+ enum ldlm_mode oa_mode;
__u64 *oa_flags;
osc_enqueue_upcall_f oa_upcall;
void *oa_cookie;
* locks added to @cancels list. */
static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
struct list_head *cancels,
- ldlm_mode_t mode, __u64 lock_flags)
+ enum ldlm_mode mode, __u64 lock_flags)
{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
- ENTRY;
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
+ int count;
+ ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
static int osc_enqueue_fini(struct ptlrpc_request *req,
osc_enqueue_upcall_f upcall, void *cookie,
- struct lustre_handle *lockh, ldlm_mode_t mode,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
__u64 *flags, int agl, int errcode)
{
bool intent = *flags & LDLM_FL_HAS_INTENT;
}
static int osc_enqueue_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct osc_enqueue_args *aa, int rc)
+ struct ptlrpc_request *req,
+ struct osc_enqueue_args *aa, int rc)
{
struct ldlm_lock *lock;
struct lustre_handle *lockh = &aa->oa_lockh;
- ldlm_mode_t mode = aa->oa_mode;
+ enum ldlm_mode mode = aa->oa_mode;
struct ost_lvb *lvb = aa->oa_lvb;
__u32 lvb_len = sizeof(*lvb);
__u64 flags = 0;
* is evicted from the cluster -- such scenarious make the life difficult, so
* release locks just after they are obtained. */
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u64 *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, union ldlm_policy_data *policy,
struct ost_lvb *lvb, int kms_valid,
osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
__u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
int rc;
ENTRY;
}
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- __u32 type, ldlm_policy_data_t *policy, __u32 mode,
- __u64 *flags, void *data, struct lustre_handle *lockh,
- int unref)
+ enum ldlm_type type, union ldlm_policy_data *policy,
+ enum ldlm_mode mode, __u64 *flags, void *data,
+ struct lustre_handle *lockh, int unref)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
- RETURN(-EIO);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
+ RETURN(-EIO);
- /* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother */
+ /* Filesystem lock extents are extended to page boundaries so that
+ * dealing with the page cache is a little smoother */
policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
policy->l_extent.end |= ~PAGE_MASK;
struct dt_object *dt,
struct lustre_handle *lh,
struct ldlm_enqueue_info *einfo,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
struct ldlm_res_id *res_id;
struct dt_device *dt_dev = lu2dt_dev(dt->do_lu.lo_dev);
struct ptlrpc_request *req;
int rc = 0;
__u64 flags = 0;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
res_id = einfo->ei_res_id;
LASSERT(res_id != NULL);
RETURN(PTR_ERR(req));
rc = ldlm_cli_enqueue(osp->opd_exp, &req, einfo, res_id,
- (const ldlm_policy_data_t *)policy,
+ (const union ldlm_policy_data *)policy,
&flags, NULL, 0, LVB_T_NONE, lh, 0);
ptlrpc_req_finished(req);
static int osp_md_object_unlock(const struct lu_env *env,
struct dt_object *dt,
struct ldlm_enqueue_info *einfo,
- ldlm_policy_data_t *policy)
+ union ldlm_policy_data *policy)
{
struct lustre_handle *lockh = einfo->ei_cbdata;
__swab64s (&id->name[i]);
}
-void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
+void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
{
- /* the lock data is a union and the first two fields are always an
- * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
- * data the same way. */
- __swab64s(&d->l_extent.start);
- __swab64s(&d->l_extent.end);
- __swab64s(&d->l_extent.gid);
- __swab64s(&d->l_flock.lfw_owner);
- __swab32s(&d->l_flock.lfw_pid);
+ /* the lock data is a union and the first two fields are always an
+ * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
+ * data the same way. */
+ __swab64s(&d->l_extent.start);
+ __swab64s(&d->l_extent.end);
+ __swab64s(&d->l_extent.gid);
+ __swab64s(&d->l_flock.lfw_owner);
+ __swab32s(&d->l_flock.lfw_pid);
}
void lustre_swab_ldlm_intent (struct ldlm_intent *i)
__swab64s (&i->opc);
}
-void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r)
+void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
{
- __swab32s (&r->lr_type);
- CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
- lustre_swab_ldlm_res_id (&r->lr_name);
+ __swab32s(&r->lr_type);
+ CLASSERT(offsetof(typeof(*r), lr_pad) != 0);
+ lustre_swab_ldlm_res_id(&r->lr_name);
}
void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
(long long)(int)offsetof(struct ldlm_resource_desc, lr_type));
LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n",
(long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding));
+ LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_pad) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_resource_desc, lr_pad));
+ LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_pad) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_pad));
LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n",
(long long)(int)offsetof(struct ldlm_resource_desc, lr_name));
LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n",
__u64 start, __u64 end, struct lustre_handle *lh,
int mode, __u64 *flags)
{
- ldlm_policy_data_t policy;
- int rc;
+ union ldlm_policy_data policy;
+ int rc;
ENTRY;
}
EXPORT_SYMBOL(tgt_extent_lock);
-void tgt_extent_unlock(struct lustre_handle *lh, ldlm_mode_t mode)
+void tgt_extent_unlock(struct lustre_handle *lh, enum ldlm_mode mode)
{
LASSERT(lustre_handle_is_used(lh));
ldlm_lock_decref(lh, mode);
int tgt_brw_lock(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
struct obd_ioobj *obj, struct niobuf_remote *nb,
- struct lustre_handle *lh, int mode)
+ struct lustre_handle *lh, enum ldlm_mode mode)
{
__u64 flags = 0;
int nrbufs = obj->ioo_bufcnt;
}
void tgt_brw_unlock(struct obd_ioobj *obj, struct niobuf_remote *niob,
- struct lustre_handle *lh, int mode)
+ struct lustre_handle *lh, enum ldlm_mode mode)
{
ENTRY;
BLANK_LINE();
CHECK_STRUCT(ldlm_resource_desc);
CHECK_MEMBER(ldlm_resource_desc, lr_type);
- CHECK_MEMBER(ldlm_resource_desc, lr_padding);
+ CHECK_MEMBER(ldlm_resource_desc, lr_pad);
CHECK_MEMBER(ldlm_resource_desc, lr_name);
}
(long long)(int)offsetof(struct ldlm_resource_desc, lr_type));
LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n",
(long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type));
- LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n",
- (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding));
- LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding));
+ LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_pad) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_resource_desc, lr_pad));
+ LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_pad) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_pad));
LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n",
(long long)(int)offsetof(struct ldlm_resource_desc, lr_name));
LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n",