void *req_cookie, ldlm_mode_t mode, __u64 flags,
void *data);
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
/**
* LVB operations.
* Position in global namespace list linking all namespaces on
* the node.
*/
- cfs_list_t ns_list_chain;
+ struct list_head ns_list_chain;
/**
* List of unused locks for this namespace. This list is also called
* to release from the head of this list.
* Locks are linked via l_lru field in \see struct ldlm_lock.
*/
- cfs_list_t ns_unused_list;
+ struct list_head ns_unused_list;
/** Number of locks in the LRU list above */
int ns_nr_unused;
/** Limit of parallel AST RPC count. */
unsigned ns_max_parallel_ast;
- /** Callback to cancel locks before replaying it during recovery. */
- ldlm_cancel_for_recovery ns_cancel_for_recovery;
+ /**
+ * Callback to check if a lock is good to be canceled by ELC or
+ * during recovery.
+ */
+ ldlm_cancel_cbt ns_cancel;
/** LDLM lock stats */
struct lprocfs_stats *ns_stats;
}
static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_for_recovery arg)
+ ldlm_cancel_cbt arg)
{
- LASSERT(ns != NULL);
- ns->ns_cancel_for_recovery = arg;
+ LASSERT(ns != NULL);
+ ns->ns_cancel = arg;
}
struct ldlm_lock;
/** Work list for sending GL ASTs to multiple locks. */
struct ldlm_glimpse_work {
struct ldlm_lock *gl_lock; /* lock to glimpse */
- cfs_list_t gl_list; /* linkage to other gl work structs */
+ struct list_head gl_list; /* linkage to other gl work structs */
__u32 gl_flags;/* see LDLM_GL_WORK_* below */
union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
* glimpse callback request */
/** Interval node data for each LDLM_EXTENT lock. */
struct ldlm_interval {
struct interval_node li_node; /* node for tree management */
- cfs_list_t li_group; /* the locks which have the same
+ struct list_head li_group; /* the locks which have the same
* policy - group of the policy */
};
#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
__u32 pid;
};
-typedef union {
- struct ldlm_extent l_extent;
- struct ldlm_flock l_flock;
- struct ldlm_inodebits l_inodebits;
-} ldlm_policy_data_t;
+union ldlm_policy_data {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock l_flock;
+ struct ldlm_inodebits l_inodebits;
+};
+
+typedef union ldlm_policy_data ldlm_policy_data_t;
void ldlm_convert_policy_to_wire(ldlm_type_t type,
const ldlm_policy_data_t *lpolicy,
* List item for client side LRU list.
* Protected by ns_lock in struct ldlm_namespace.
*/
- cfs_list_t l_lru;
+ struct list_head l_lru;
/**
* Linkage to resource's lock queues according to current lock state.
* (could be granted, waiting or converting)
* Protected by lr_lock in struct ldlm_resource.
*/
- cfs_list_t l_res_link;
+ struct list_head l_res_link;
/**
* Tree node for ldlm_extent.
*/
* Per export hash of locks.
* Protected by per-bucket exp->exp_lock_hash locks.
*/
- cfs_hlist_node_t l_exp_hash;
+ struct hlist_node l_exp_hash;
/**
* Per export hash of flock locks.
* Protected by per-bucket exp->exp_flock_hash locks.
*/
- cfs_hlist_node_t l_exp_flock_hash;
+ struct hlist_node l_exp_flock_hash;
/**
* Requested mode.
* Protected by lr_lock.
* expired_lock_thread.elt_expired_locks for further processing.
* Protected by elt_lock.
*/
- cfs_list_t l_pending_chain;
+ struct list_head l_pending_chain;
/**
* Set when lock is sent a blocking AST. Time in seconds when timeout
*/
int l_bl_ast_run;
/** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
- cfs_list_t l_bl_ast;
+ struct list_head l_bl_ast;
/** List item ldlm_add_ast_work_item() for case of completion ASTs. */
- cfs_list_t l_cp_ast;
+ struct list_head l_cp_ast;
/** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
- cfs_list_t l_rk_ast;
+ struct list_head l_rk_ast;
/**
* Pointer to a conflicting lock that caused blocking AST to be sent
* Protected by lr_lock, linkages to "skip lists".
* For more explanations of skip lists see ldlm/ldlm_inodebits.c
*/
- cfs_list_t l_sl_mode;
- cfs_list_t l_sl_policy;
+ struct list_head l_sl_mode;
+ struct list_head l_sl_policy;
/** Reference tracking structure to debug leaked locks. */
struct lu_ref l_reference;
/** number of export references taken */
int l_exp_refs_nr;
/** link all locks referencing one export */
- cfs_list_t l_exp_refs_link;
+ struct list_head l_exp_refs_link;
/** referenced export object */
struct obd_export *l_exp_refs_target;
#endif
* Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
* is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
*/
- cfs_list_t l_exp_list;
+ struct list_head l_exp_list;
};
/**
* List item for list in namespace hash.
* protected by ns_lock
*/
- cfs_hlist_node_t lr_hash;
+ struct hlist_node lr_hash;
/** Spinlock to protect locks under this resource. */
spinlock_t lr_lock;
* protected by lr_lock
* @{ */
/** List of locks in granted state */
- cfs_list_t lr_granted;
+ struct list_head lr_granted;
/** List of locks waiting to change their granted mode (converted) */
- cfs_list_t lr_converting;
+ struct list_head lr_converting;
/**
* List of locks that could not be granted due to conflicts and
* that are waiting for conflicts to go away */
- cfs_list_t lr_waiting;
+ struct list_head lr_waiting;
/** @} */
/* XXX No longer needed? Remove ASAP */
}
struct ldlm_ast_work {
- struct ldlm_lock *w_lock;
- int w_blocking;
- struct ldlm_lock_desc w_desc;
- cfs_list_t w_list;
- int w_flags;
- void *w_data;
- int w_datalen;
+ struct ldlm_lock *w_lock;
+ int w_blocking;
+ struct ldlm_lock_desc w_desc;
+ struct list_head w_list;
+ int w_flags;
+ void *w_data;
+ int w_datalen;
};
/**
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
void *ei_cbdata; /** Data to be passed into callbacks. */
+ unsigned int ei_enq_slave:1; /* whether enqueue slave stripes */
};
+#define ei_res_id ei_cb_gl
+
extern struct obd_ops ldlm_obd_ops;
extern char *ldlm_lockname[];
#endif
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
- cfs_list_t *work_list);
+ int first_enq, ldlm_error_t *err,
+ struct list_head *work_list);
/**
* Return values for lock iterators.
void *data, int flag);
int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
-int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
+int ldlm_glimpse_locks(struct ldlm_resource *res,
+ struct list_head *gl_work_list);
/** @} ldlm_srv_ast */
/** \defgroup ldlm_handlers Server LDLM handlers
lock; \
})
-#define ldlm_lock_list_put(head, member, count) \
-({ \
- struct ldlm_lock *_lock, *_next; \
- int c = count; \
- cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
- if (c-- == 0) \
- break; \
- cfs_list_del_init(&_lock->member); \
- LDLM_LOCK_RELEASE(_lock); \
- } \
- LASSERT(c <= 0); \
+#define ldlm_lock_list_put(head, member, count) \
+({ \
+ struct ldlm_lock *_lock, *_next; \
+ int c = count; \
+ list_for_each_entry_safe(_lock, _next, head, member) { \
+ if (c-- == 0) \
+ break; \
+ list_del_init(&_lock->member); \
+ LDLM_LOCK_RELEASE(_lock); \
+ } \
+ LASSERT(c <= 0); \
})
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
int ldlm_resource_putref(struct ldlm_resource *res);
void ldlm_resource_add_lock(struct ldlm_resource *res,
- cfs_list_t *head,
- struct ldlm_lock *lock);
+ struct list_head *head,
+ struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- cfs_list_t *cancels,
- int count);
-int ldlm_prep_elc_req(struct obd_export *exp,
- struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- cfs_list_t *cancels, int count);
+ struct ptlrpc_request *req,
+ struct list_head *cancels,
+ int count);
+int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count);
struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len);
int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
ldlm_mode_t mode,
ldlm_cancel_flags_t flags,
void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
- int count, ldlm_cancel_flags_t flags);
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
+ int count, ldlm_cancel_flags_t flags);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- cfs_list_t *cancels,
+ struct list_head *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, __u64 lock_flags,
ldlm_cancel_flags_t cancel_flags, void *opaque);
-int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
ldlm_cancel_flags_t flags);
-int ldlm_cli_cancel_list(cfs_list_t *head, int count,
+int ldlm_cli_cancel_list(struct list_head *head, int count,
struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
/** @} ldlm_cli_api */
int intent_disposition(struct ldlm_reply *rep, int flag);
void intent_set_disposition(struct ldlm_reply *rep, int flag);
-
-/* ioctls for trying requests */
-#define IOC_LDLM_TYPE 'f'
-#define IOC_LDLM_MIN_NR 40
-
-#define IOC_LDLM_TEST _IOWR('f', 40, long)
-#define IOC_LDLM_DUMP _IOWR('f', 41, long)
-#define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
-#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
-#define IOC_LDLM_MAX_NR 43
-
/**
* "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
* than one lock_res is dead-lock safe.
/** Check if resource is already locked, assert if not. */
static inline void check_res_locked(struct ldlm_resource *res)
{
- LASSERT(spin_is_locked(&res->lr_lock));
+ assert_spin_locked(&res->lr_lock);
}
struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);