#define LDLM_FL_REPLAY 0x000100
#define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
-#define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused */
+#define LDLM_FL_LOCAL_ONLY 0x000400
/* don't run the cancel callback under ldlm_cli_cancel_unused */
#define LDLM_FL_FAILED 0x000800
#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
#define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
#define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
-#define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */
+/* was LDLM_FL_WARN until 2.0.0 0x008000 */
#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
* w/o involving separate thread. in order to decrease cs rate */
#define LDLM_FL_ATOMIC_CB 0x4000000
-/* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */
-#define LDLM_FL_ASYNC 0x8000000
+/* was LDLM_FL_ASYNC until 2.0.0 0x8000000 */
/* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
* such that server send blocking ast for conflict locks to this client for
#define LUSTRE_TRACKS_LOCK_EXP_REFS (1)
+/* Cancel flag. */
+typedef enum {
+ LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
+ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
+ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
+ * in the same RPC */
+} ldlm_cancel_flags_t;
+
struct ldlm_lock {
/**
* Must be first in the structure.
const struct ldlm_request *dlm_req);
int ldlm_cli_cancel(struct lustre_handle *lockh);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
- int flags, void *opaque);
+ ldlm_cancel_flags_t flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int flags, void *opaque);
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque);
int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
- int count, int flags);
+ int count, ldlm_cancel_flags_t flags);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
cfs_list_t *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
- int cancel_flags, void *opaque);
+ ldlm_cancel_flags_t cancel_flags, void *opaque);
int ldlm_cli_cancel_list(cfs_list_t *head, int count,
- struct ptlrpc_request *req, int flags);
+ struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
/* mds/handler.c */
/* This has to be here because recursive inclusion sucks. */
int (*o_cancel)(struct obd_export *, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *);
int (*o_cancel_unused)(struct obd_export *, struct lov_stripe_md *,
- int flags, void *opaque);
+ ldlm_cancel_flags_t flags, void *opaque);
int (*o_init_export)(struct obd_export *exp);
int (*o_destroy_export)(struct obd_export *exp);
int (*o_extent_calc)(struct obd_export *, struct lov_stripe_md *,
struct lustre_handle *);
int (*m_cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, ldlm_mode_t, int flags,
- void *opaque);
+ ldlm_policy_data_t *, ldlm_mode_t,
+ ldlm_cancel_flags_t flags, void *opaque);
int (*m_renew_capa)(struct obd_export *, struct obd_capa *oc,
renew_capa_cb_t cb);
int (*m_unpack_capa)(struct obd_export *, struct ptlrpc_request *,
static inline int obd_cancel_unused(struct obd_export *exp,
struct lov_stripe_md *ea,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
int rc;
ENTRY;
static inline int md_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int flags, void *opaque)
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
int rc;
ENTRY;
int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
cfs_list_t *cancels, int count, int max,
- int cancel_flags, int flags);
+ ldlm_cancel_flags_t cancel_flags, int flags);
extern int ldlm_enqueue_min;
int ldlm_get_enq_timeout(struct ldlm_lock *lock);
if (obd->obd_namespace != NULL) {
/* obd_force == local only */
ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
- obd->obd_force ? LDLM_FL_LOCAL_ONLY:0,
- NULL);
+ obd->obd_force ? LCF_LOCAL : 0, NULL);
ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
}
/* Prepare and send a batched cancel rpc, it will include count lock handles
* of locks given in @head. */
int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
- int count, int flags)
+ int count, ldlm_cancel_flags_t flags)
{
struct ptlrpc_request *req = NULL;
struct obd_import *imp;
ldlm_cancel_pack(req, cancels, count);
ptlrpc_request_set_replen(req);
- if (flags & LDLM_FL_ASYNC) {
+ if (flags & LCF_ASYNC) {
ptlrpcd_add_req(req, PSCOPE_OTHER);
sent = count;
GOTO(out, 0);
flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LDLM_FL_BL_AST, flags);
+ LCF_BL_AST, flags);
}
ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(0);
/* XXX until we will have compound requests and can cut cancels from generic rpc
* we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-static int ldlm_cancel_list(cfs_list_t *cancels, int count, int flags)
+static int ldlm_cancel_list(cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t flags)
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
if (left-- == 0)
break;
- if (flags & LDLM_FL_LOCAL_ONLY) {
+ if (flags & LCF_LOCAL) {
rc = LDLM_FL_LOCAL_ONLY;
ldlm_lock_cancel(lock);
} else {
rc = ldlm_cli_cancel_local(lock);
}
- if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
+ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
cfs_list_del_init(&lock->l_bl_ast);
cfs_list_add(&lock->l_bl_ast, &head);
* flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy".
*/
int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
- int count, int max, int cancel_flags, int flags)
+ int count, int max, ldlm_cancel_flags_t cancel_flags,
+ int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
cfs_list_t *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
- int cancel_flags, void *opaque)
+ ldlm_cancel_flags_t cancel_flags, void *opaque)
{
struct ldlm_lock *lock;
int count = 0;
continue;
}
- if (lock->l_readers || lock->l_writers) {
- if (cancel_flags & LDLM_FL_WARN) {
- LDLM_ERROR(lock, "lock in use");
- //LBUG();
- }
+ if (lock->l_readers || lock->l_writers)
continue;
- }
/* If somebody is already doing CANCEL, or blocking ast came,
* skip this lock. */
* buffer at the offset @off.
* Destroy @cancels at the end. */
int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
- struct ptlrpc_request *req, int flags)
+ struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
{
struct ldlm_lock *lock;
int res = 0;
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int flags, void *opaque)
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
struct ldlm_resource *res;
CFS_LIST_HEAD(cancels);
/* Cancel all locks on a namespace (or a specific resource, if given)
* that have 0 readers/writers.
*
- * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
+ * If flags & LCF_LOCAL, throw the locks away without trying
* to notify the server. */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags, void *opaque)
{
int i;
ENTRY;
CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n",
PFID(st_fid), tgt->ltd_idx);
rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy,
- mode, LDLM_FL_ASYNC, NULL);
+ mode, LCF_ASYNC, NULL);
if (rc)
GOTO(out_put_obj, rc);
} else {
CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
policy.l_inodebits.bits = bits;
rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
- mode, LDLM_FL_ASYNC, NULL);
+ mode, LCF_ASYNC, NULL);
} else {
CDEBUG(D_INODE,
"EARLY_CANCEL skip operation target %d on "DFID"\n",
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags, void *opaque)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
static int lov_cancel_unused(struct obd_export *exp,
struct lov_stripe_md *lsm,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags, void *opaque)
{
struct lov_obd *lov;
int rc = 0, i;
struct ptlrpc_request **request);
int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
- int flags, void *opaque);
+ ldlm_cancel_flags_t flags, void *opaque);
static inline void mdc_set_capa_size(struct ptlrpc_request *req,
const struct req_msg_field *field,
int mdc_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int flags, void *opaque)
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
struct ldlm_res_id res_id;
struct obd_device *obd = class_exp2obd(exp);
}
static int osc_cancel_unused(struct obd_export *exp,
- struct lov_stripe_md *lsm, int flags,
+ struct lov_stripe_md *lsm,
+ ldlm_cancel_flags_t flags,
void *opaque)
{
struct obd_device *obd = class_exp2obd(exp);