#define LDLM_FL_BLOCK_GRANTED 0x000002
#define LDLM_FL_BLOCK_CONV 0x000004
#define LDLM_FL_BLOCK_WAIT 0x000008
-#define LDLM_FL_CBPENDING 0x000010
#define LDLM_FL_AST_SENT 0x000020
-#define LDLM_FL_WAIT_NOREPROC 0x000040
-#define LDLM_FL_CANCEL 0x000080
#define LDLM_FL_REPLAY 0x000100
#define LDLM_FL_INTENT_ONLY 0x000200
-#define LDLM_FL_LOCAL_ONLY 0x000400
-#define LDLM_FL_FAILED 0x000800
#define LDLM_FL_HAS_INTENT 0x001000
-#define LDLM_FL_CANCELING 0x002000
-#define LDLM_FL_LOCAL 0x004000
-#define LDLM_FL_WARN 0x008000
#define LDLM_FL_DISCARD_DATA 0x010000
#define LDLM_FL_NO_TIMEOUT 0x020000
#define LDLM_FL_BLOCK_NOWAIT 0x040000
#define LDLM_FL_TEST_LOCK 0x080000
-#define LDLM_FL_LVB_READY 0x100000
-#define LDLM_FL_KMS_IGNORE 0x200000
-#define LDLM_FL_NO_LRU 0x400000
#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
-#define LDLM_FL_CP_REQD 0x1000000
-#define LDLM_FL_CLEANED 0x2000000
-#define LDLM_FL_ATOMIC_CB 0x4000000
-#define LDLM_FL_BL_AST 0x10000000
-#define LDLM_FL_BL_DONE 0x20000000
#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
#define LDLM_AST_DISCARD_DATA 0x80000000
-
#define LDLM_ENQUEUE (101)
#define LDLM_CONVERT (102)
#define LDLM_CANCEL (103)
{0x000002 , "LDLM_FL_BLOCK_GRANTED"},
{0x000004 , "LDLM_FL_BLOCK_CONV"},
{0x000008 , "LDLM_FL_BLOCK_WAIT"},
- {0x000010 , "LDLM_FL_CBPENDING"},
{0x000020 , "LDLM_FL_AST_SENT"},
- {0x000040 , "LDLM_FL_WAIT_NOREPROC"},
- {0x000080 , "LDLM_FL_CANCEL"},
{0x000100 , "LDLM_FL_REPLAY"},
{0x000200 , "LDLM_FL_INTENT_ONLY"},
- {0x000400 , "LDLM_FL_LOCAL_ONLY"},
- {0x000800 , "LDLM_FL_FAILED"},
{0x001000 , "LDLM_FL_HAS_INTENT"},
- {0x002000 , "LDLM_FL_CANCELING"},
- {0x004000 , "LDLM_FL_LOCAL"},
- {0x008000 , "LDLM_FL_WARN"},
{0x010000 , "LDLM_FL_DISCARD_DATA"},
{0x020000 , "LDLM_FL_NO_TIMEOUT"},
{0x040000 , "LDLM_FL_BLOCK_NOWAIT"},
{0x080000 , "LDLM_FL_TEST_LOCK"},
- {0x100000 , "LDLM_FL_LVB_READY"},
- {0x200000 , "LDLM_FL_KMS_IGNORE"},
- {0x400000 , "LDLM_FL_NO_LRU"},
{0x800000 , "LDLM_FL_CANCEL_ON_BLOCK"},
- {0x1000000 , "LDLM_FL_CP_REQD"},
- {0x2000000 , "LDLM_FL_CLEANED"},
- {0x4000000 , "LDLM_FL_ATOMIC_CB"},
- {0x10000000 , "LDLM_FL_BL_AST"},
- {0x20000000 , "LDLM_FL_BL_DONE"},
{0x40000000 , "LDLM_FL_DENY_ON_CONTENTION"},
{0x80000000 , "LDLM_AST_DISCARD_DATA"},
{ 0, NULL }
{&hf_lustre_ldlm_fl_block_granted, {"LDLM_FL_BLOCK_GRANTED", "lustre.ldlm_fl_block_granted", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BLOCK_GRANTED, "", HFILL}},
{&hf_lustre_ldlm_fl_block_conv, {"LDLM_FL_BLOCK_CONV", "lustre.ldlm_fl_block_conv", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BLOCK_CONV, "", HFILL}},
{&hf_lustre_ldlm_fl_block_wait, {"LDLM_FL_BLOCK_WAIT", "lustre.ldlm_fl_block_wait", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BLOCK_WAIT, "", HFILL}},
- {&hf_lustre_ldlm_fl_cbpending, {"LDLM_FL_CBPENDING", "lustre.ldlm_fl_cbpending", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CBPENDING, "", HFILL}},
{&hf_lustre_ldlm_fl_ast_sent, {"LDLM_FL_AST_SENT", "lustre.ldlm_fl_ast_sent", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_AST_SENT, "", HFILL}},
- {&hf_lustre_ldlm_fl_wait_noreproc, {"LDLM_FL_WAIT_NOREPROC", "lustre.ldlm_fl_wait_noreproc", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_WAIT_NOREPROC, "", HFILL}},
- {&hf_lustre_ldlm_fl_cancel, {"LDLM_FL_CANCEL", "lustre.ldlm_fl_cancel", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CANCEL, "", HFILL}},
{&hf_lustre_ldlm_fl_replay, {"LDLM_FL_REPLAY", "lustre.ldlm_fl_replay", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_REPLAY, "", HFILL}},
{&hf_lustre_ldlm_fl_intent_only, {"LDLM_FL_INTENT_ONLY", "lustre.ldlm_fl_intent_only", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_INTENT_ONLY, "", HFILL}},
- {&hf_lustre_ldlm_fl_local_only, {"LDLM_FL_LOCAL_ONLY", "lustre.ldlm_fl_local_only", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_LOCAL_ONLY, "", HFILL}},
- {&hf_lustre_ldlm_fl_failed, {"LDLM_FL_FAILED", "lustre.ldlm_fl_failed", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_FAILED, "", HFILL}},
{&hf_lustre_ldlm_fl_has_intent, {"LDLM_FL_HAS_INTENT", "lustre.ldlm_fl_has_intent", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_HAS_INTENT, "", HFILL}},
- {&hf_lustre_ldlm_fl_canceling, {"LDLM_FL_CANCELING", "lustre.ldlm_fl_canceling", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CANCELING, "", HFILL}},
- {&hf_lustre_ldlm_fl_local, {"LDLM_FL_LOCAL", "lustre.ldlm_fl_local", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_LOCAL, "", HFILL}},
- {&hf_lustre_ldlm_fl_warn, {"LDLM_FL_WARN", "lustre.ldlm_fl_warn", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_WARN, "", HFILL}},
{&hf_lustre_ldlm_fl_discard_data, {"LDLM_FL_DISCARD_DATA", "lustre.ldlm_fl_discard_data", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_DISCARD_DATA, "", HFILL}},
{&hf_lustre_ldlm_fl_no_timeout, {"LDLM_FL_NO_TIMEOUT", "lustre.ldlm_fl_no_timeout", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_NO_TIMEOUT, "", HFILL}},
{&hf_lustre_ldlm_fl_block_nowait, {"LDLM_FL_BLOCK_NOWAIT", "lustre.ldlm_fl_block_nowait", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BLOCK_NOWAIT, "", HFILL}},
{&hf_lustre_ldlm_fl_test_lock, {"LDLM_FL_TEST_LOCK", "lustre.ldlm_fl_test_lock", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_TEST_LOCK, "", HFILL}},
- {&hf_lustre_ldlm_fl_lvb_ready, {"LDLM_FL_LVB_READY", "lustre.ldlm_fl_lvb_ready", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_LVB_READY, "", HFILL}},
- {&hf_lustre_ldlm_fl_kms_ignore, {"LDLM_FL_KMS_IGNORE", "lustre.ldlm_fl_kms_ignore", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_KMS_IGNORE, "", HFILL}},
- {&hf_lustre_ldlm_fl_no_lru, {"LDLM_FL_NO_LRU", "lustre.ldlm_fl_no_lru", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_NO_LRU, "", HFILL}},
{&hf_lustre_ldlm_fl_cancel_on_block, {"LDLM_FL_CANCEL_ON_BLOCK", "lustre.ldlm_fl_cancel_on_block", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CANCEL_ON_BLOCK, "", HFILL}},
- {&hf_lustre_ldlm_fl_cp_reqd, {"LDLM_FL_CP_REQD", "lustre.ldlm_fl_cp_reqd", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CP_REQD, "", HFILL}},
- {&hf_lustre_ldlm_fl_cleaned, {"LDLM_FL_CLEANED", "lustre.ldlm_fl_cleaned", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_CLEANED, "", HFILL}},
- {&hf_lustre_ldlm_fl_atomic_cb, {"LDLM_FL_ATOMIC_CB", "lustre.ldlm_fl_atomic_cb", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_ATOMIC_CB, "", HFILL}},
- {&hf_lustre_ldlm_fl_bl_ast, {"LDLM_FL_BL_AST", "lustre.ldlm_fl_bl_ast", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BL_AST, "", HFILL}},
- {&hf_lustre_ldlm_fl_bl_done, {"LDLM_FL_BL_DONE", "lustre.ldlm_fl_bl_done", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_BL_DONE, "", HFILL}},
{&hf_lustre_ldlm_fl_deny_on_contention, {"LDLM_FL_DENY_ON_CONTENTION", "lustre.ldlm_fl_deny_on_contention", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_FL_DENY_ON_CONTENTION, "", HFILL}},
{&hf_lustre_ldlm_ast_discard_data, {"LDLM_AST_DISCARD_DATA", "lustre.ldlm_ast_discard_data", FT_BOOLEAN, 32, TFS(&flags_set_truth), LDLM_AST_DISCARD_DATA, "", HFILL}},
extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
+#define ldlm_flags_to_wire(flags) ((__u32)(flags))
+#define ldlm_flags_from_wire(flags) ((__u64)(flags))
+
/*
* Opcodes for mountconf (mgs and mgc)
*/
LDLM_NAMESPACE_CLIENT = 1 << 1
} ldlm_side_t;
+/**
+ * Declaration of flags sent through the wire.
+ **/
#define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
/* If the server returns one of these flags, then the lock was put on that list.
#define LDLM_FL_BLOCK_CONV 0x000004
#define LDLM_FL_BLOCK_WAIT 0x000008
-#define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
+/* Used to be LDLM_FL_CBPENDING 0x000010 moved to non-wire flags */
+
#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
* queued for sending. */
-#define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
-#define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
+/* Used to be LDLM_FL_WAIT_NOREPROC 0x000040 moved to non-wire flags */
+/* Used to be LDLM_FL_CANCEL 0x000080 moved to non-wire flags */
/* Lock is being replayed. This could probably be implied by the fact that one
* of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
#define LDLM_FL_REPLAY 0x000100
#define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
-#define LDLM_FL_LOCAL_ONLY 0x000400
-/* don't run the cancel callback under ldlm_cli_cancel_unused */
-#define LDLM_FL_FAILED 0x000800
+/* Used to be LDLM_FL_LOCAL_ONLY 0x000400 moved to non-wire flags */
+/* Used to be LDLM_FL_FAILED 0x000800 moved to non-wire flags */
#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
-#define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
-#define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
+
+/* Used to be LDLM_FL_CANCELING 0x002000 moved to non-wire flags */
+/* Used to be LDLM_FL_LOCAL 0x004000 moved to non-wire flags */
+
#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
* callback. */
#define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
-/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
- * the LVB filling happens _after_ the lock has been granted, so another thread
- * can match`t before the LVB has been updated. As a dirty hack, we set
- * LDLM_FL_LVB_READY only after we've done the LVB poop.
- * this is only needed on lov/osc now, where lvb is actually used and callers
- * must set it in input flags.
- *
- * The proper fix is to do the granting inside of the completion AST, which can
- * be replaced with a LVB-aware wrapping function for OSC locks. That change is
- * pretty high-risk, though, and would need a lot more testing. */
-
-#define LDLM_FL_LVB_READY 0x100000
-
-/* A lock contributes to the kms calculation until it has finished the part
- * of it's cancelation that performs write back on its dirty pages. It
- * can remain on the granted list during this whole time. Threads racing
- * to update the kms after performing their writeback need to know to
- * exclude each others locks from the calculation as they walk the granted
- * list. */
-#define LDLM_FL_KMS_IGNORE 0x200000
-
-/* Don't put lock into the LRU list, so that it is not canceled due to aging.
- * Used by MGC locks, they are cancelled only at unmount or by callback. */
-#define LDLM_FL_NO_LRU 0x400000
+/* Used to be LDLM_FL_LVB_READY 0x100000 moved to non-wire flags */
+/* Used to be LDLM_FL_KMS_IGNORE 0x200000 moved to non-wire flags */
+/* Used to be LDLM_FL_NO_LRU 0x400000 moved to non-wire flags */
/* Immediatelly cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This is
/* Flags flags inherited from parent lock when doing intents. */
#define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
-/* completion ast to be executed */
-#define LDLM_FL_CP_REQD 0x1000000
-
-/* cleanup_resource has already handled the lock */
-#define LDLM_FL_CLEANED 0x2000000
-
-/* optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
-#define LDLM_FL_ATOMIC_CB 0x4000000
-
-/* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
- * such that server send blocking ast for conflict locks to this client for
- * the 1st operation, whereas the 2nd operation has canceled this lock and
- * is waiting for rpc_lock which is taken by the 1st operation.
- * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
- * ELC code to cancel it.
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * droped to let ldlm_callback_handler() return EINVAL to the server. It is
- * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
- * to send a separate CANCEL rpc. */
-#define LDLM_FL_BL_AST 0x10000000
-#define LDLM_FL_BL_DONE 0x20000000
+/* Used to be LDLM_FL_CP_REQD 0x1000000 moved to non-wire flags */
+/* Used to be LDLM_FL_CLEANED 0x2000000 moved to non-wire flags */
+/* Used to be LDLM_FL_ATOMIC_CB 0x4000000 moved to non-wire flags */
+/* Used to be LDLM_FL_BL_AST 0x10000000 moved to non-wire flags */
+/* Used to be LDLM_FL_BL_DONE 0x20000000 moved to non-wire flags */
/* measure lock contention and return -EUSERS if locking contention is high */
#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
-/*
+/*
* --------------------------------------------------------------------------
* NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
* 0x80000000 will not be sent over the wire.
* --------------------------------------------------------------------------
*/
+/**
+ * Declaration of flags not sent through the wire.
+ **/
/* Used for marking lock as an target for -EINTR while cp_ast sleep
* emulation + race with upcoming bl_ast. */
#define LDLM_FL_FAIL_LOC 0x100000000ULL
-
/* Used while processing the unused list to know that we have already
* handled this lock and decided to skip it */
#define LDLM_FL_SKIPPED 0x200000000ULL
+/* this lock is being destroyed */
+#define LDLM_FL_CBPENDING 0x400000000ULL
+/* not a real flag, not saved in lock */
+#define LDLM_FL_WAIT_NOREPROC 0x800000000ULL
+/* cancellation callback already run */
+#define LDLM_FL_CANCEL 0x1000000000ULL
+#define LDLM_FL_LOCAL_ONLY 0x2000000000ULL
+/* don't run the cancel callback under ldlm_cli_cancel_unused */
+#define LDLM_FL_FAILED 0x4000000000ULL
+/* lock cancel has already been sent */
+#define LDLM_FL_CANCELING 0x8000000000ULL
+/* local lock (ie, no srv/cli split) */
+#define LDLM_FL_LOCAL 0x10000000000ULL
+/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
+ * the LVB filling happens _after_ the lock has been granted, so another thread
+ * can match`t before the LVB has been updated. As a dirty hack, we set
+ * LDLM_FL_LVB_READY only after we've done the LVB poop.
+ * this is only needed on lov/osc now, where lvb is actually used and callers
+ * must set it in input flags.
+ *
+ * The proper fix is to do the granting inside of the completion AST, which can
+ * be replaced with a LVB-aware wrapping function for OSC locks. That change is
+ * pretty high-risk, though, and would need a lot more testing. */
+#define LDLM_FL_LVB_READY 0x20000000000ULL
+/* A lock contributes to the kms calculation until it has finished the part
+ * of it's cancelation that performs write back on its dirty pages. It
+ * can remain on the granted list during this whole time. Threads racing
+ * to update the kms after performing their writeback need to know to
+ * exclude each others locks from the calculation as they walk the granted
+ * list. */
+#define LDLM_FL_KMS_IGNORE 0x40000000000ULL
+/* completion ast to be executed */
+#define LDLM_FL_CP_REQD 0x80000000000ULL
+/* cleanup_resource has already handled the lock */
+#define LDLM_FL_CLEANED 0x100000000000ULL
+/* optimization hint: LDLM can run blocking callback from current context
+ * w/o involving separate thread. in order to decrease cs rate */
+#define LDLM_FL_ATOMIC_CB 0x200000000000ULL
+/* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
+ * such that server send blocking ast for conflict locks to this client for
+ * the 1st operation, whereas the 2nd operation has canceled this lock and
+ * is waiting for rpc_lock which is taken by the 1st operation.
+ * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
+ * ELC code to cancel it.
+ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
+ * droped to let ldlm_callback_handler() return EINVAL to the server. It is
+ * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
+ * to send a separate CANCEL rpc. */
+#define LDLM_FL_BL_AST 0x400000000000ULL
+#define LDLM_FL_BL_DONE 0x800000000000ULL
+/* Don't put lock into the LRU list, so that it is not canceled due to aging.
+ * Used by MGC locks, they are cancelled only at unmount or by callback. */
+#define LDLM_FL_NO_LRU 0x1000000000000ULL
+
/* The blocking callback is overloaded to perform two functions. These flags
* indicate which operation should be performed. */
};
typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
- void *req_cookie, ldlm_mode_t mode, int flags,
- void *data);
+ void *req_cookie, ldlm_mode_t mode, __u64 flags,
+ void *data);
typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
struct ldlm_lock_desc *new, void *data,
int flag);
-typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
- void *data);
+typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
+ void *data);
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
#define LDLM_DEBUG_NOLOCK(format, a...) \
CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
-typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
+typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
cfs_list_t *work_list);
ldlm_iterator_t iter, void *data);
/* ldlm_flock.c */
-int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
+int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
/* ldlm_extent.c */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
#ifdef HAVE_SERVER_SUPPORT
int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
void *data, int flag);
-int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
+int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
void ldlm_lock2handle(const struct ldlm_lock *lock,
struct lustre_handle *lockh);
-struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
+struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
void ldlm_cancel_callback(struct ldlm_lock *);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
int ldlm_lock_set_data(struct lustre_handle *, void *);
lu_ref_del(&lock->l_reference, "handle", cfs_current())
static inline struct ldlm_lock *
-ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
+ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
{
struct ldlm_lock *lock;
void ldlm_lock_fail_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
+ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *, ldlm_type_t type,
ldlm_policy_data_t *, ldlm_mode_t mode,
struct lustre_handle *, int unref);
ldlm_namespace_new(struct obd_device *obd, char *name,
ldlm_side_t client, ldlm_appetite_t apt,
ldlm_ns_type_t ns_type);
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
+int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
void ldlm_namespace_free(struct ldlm_namespace *ns,
struct obd_import *imp, int force);
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
-int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data);
-int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
+int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
+int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, int *flags,
+ ldlm_policy_data_t const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
int async);
int ldlm_prep_enqueue_req(struct obd_export *exp,
cfs_list_t *cancels, int count);
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- int *flags, void *lvb, __u32 lvb_len,
+ __u64 *flags, void *lvb, __u32 lvb_len,
struct lustre_handle *lockh, int rc);
int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int *flags,
+ ldlm_mode_t mode, __u64 *flags,
ldlm_blocking_callback blocking,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
- while stats, the flags used for control delay/resend.
- while setattr, the flags used for distinguish punch operation
*/
- int oi_flags;
+ __u64 oi_flags;
/* Lock handle specific for every OSC lock. */
struct lustre_handle *oi_lockh;
/* lsm data specific for every OSC. */
int (*m_enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
struct lookup_intent *, struct md_op_data *,
struct lustre_handle *, void *, int,
- struct ptlrpc_request **, int);
+ struct ptlrpc_request **, __u64);
int (*m_getattr)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*m_getattr_name)(struct obd_export *, struct md_op_data *,
int (*m_intent_lock)(struct obd_export *, struct md_op_data *,
void *, int, struct lookup_intent *, int,
struct ptlrpc_request **,
- ldlm_blocking_callback, int);
+ ldlm_blocking_callback, __u64);
int (*m_link)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*m_rename)(struct obd_export *, struct md_op_data *,
struct obd_client_handle *);
int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
- ldlm_mode_t (*m_lock_match)(struct obd_export *, int,
+ ldlm_mode_t (*m_lock_match)(struct obd_export *, __u64,
const struct lu_fid *, ldlm_type_t,
ldlm_policy_data_t *, ldlm_mode_t,
struct lustre_handle *);
}
static inline int md_intent_lock(struct obd_export *exp,
- struct md_op_data *op_data, void *lmm,
- int lmmsize, struct lookup_intent *it,
- int flags, struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ struct md_op_data *op_data, void *lmm,
+ int lmmsize, struct lookup_intent *it,
+ int lookup_flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
{
int rc;
ENTRY;
EXP_CHECK_MD_OP(exp, intent_lock);
EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
- it, flags, reqp, cb_blocking,
+ it, lookup_flags, reqp, cb_blocking,
extra_lock_flags);
RETURN(rc);
}
RETURN(rc);
}
-static inline ldlm_mode_t md_lock_match(struct obd_export *exp, int flags,
+static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid,
ldlm_type_t type,
ldlm_policy_data_t *policy,
struct osc_enqueue_args {
struct obd_export *oa_exp;
- int *oa_flags;
+ __u64 *oa_flags;
obd_enqueue_update_f oa_upcall;
void *oa_cookie;
struct ost_lvb *oa_lvb;
/* In order to determine the largest possible extent we can grant, we need
* to scan all of the queues. */
static void ldlm_extent_policy(struct ldlm_resource *res,
- struct ldlm_lock *lock, int *flags)
+ struct ldlm_lock *lock, __u64 *flags)
{
struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
*/
static int
ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
- int *flags, ldlm_error_t *err,
- cfs_list_t *work_list, int *contended_locks)
+ __u64 *flags, ldlm_error_t *err,
+ cfs_list_t *work_list, int *contended_locks)
{
cfs_list_t *tmp;
struct ldlm_lock *lock;
* If first_enq is 1 (ie, called from ldlm_lock_enqueue):
* - blocking ASTs have not been sent
* - must call this function with the ns lock held once */
-int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list)
+int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
{
ENTRY;
- LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
- mode, flags);
+ LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
+ mode, flags);
- /* Safe to not lock here, since it should be empty anyway */
+ /* Safe to not lock here, since it should be empty anyway */
LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
cfs_list_del_init(&lock->l_res_link);
}
int
-ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list)
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
+ ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
int rc;
ENTRY;
- CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
- " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
+ CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
+ LPU64" end "LPU64"\n", *flags,
+ new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
req->l_policy_data.l_flock.end);
* \retval <0 : failure
*/
int
-ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
cfs_flock_t *getlk = lock->l_ast_data;
struct obd_device *obd;
int rc = 0;
ENTRY;
- CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
+ CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
/* Import invalidation. We need to actually release the lock
cfs_flock_set_end(getlk,
(loff_t)lock->l_policy_data.l_flock.end);
} else {
- int noreproc = LDLM_FL_WAIT_NOREPROC;
+ __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
/* We need to reprocess the lock to do merges or splits
* with existing locks owned by this process. */
* If first_enq is 1 (ie, called from ldlm_lock_enqueue):
* - blocking ASTs have not been sent
* - must call this function with the ns lock held once */
-int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
+int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
cfs_list_t *work_list)
{
const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len);
ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
- void *cookie, int *flags);
+ void *cookie, __u64 *flags);
void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
#ifdef HAVE_SERVER_SUPPORT
/* ldlm_plain.c */
-int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list);
+int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ cfs_list_t *work_list);
/* ldlm_inodebits.c */
-int ldlm_process_inodebits_lock(struct ldlm_lock *lock, int *flags,
+int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
int first_enq, ldlm_error_t *err,
cfs_list_t *work_list);
#endif
/* ldlm_extent.c */
#ifdef HAVE_SERVER_SUPPORT
-int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list);
+int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ cfs_list_t *work_list);
#endif
void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
/* ldlm_flock.c */
-int ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list);
+int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ cfs_list_t *work_list);
int ldlm_init_flock_export(struct obd_export *exp);
void ldlm_destroy_flock_export(struct obd_export *exp);
*/
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
- int flags)
+ __u64 flags)
{
struct ldlm_lock *lock;
ENTRY;
ldlm_mode_t *mode,
ldlm_policy_data_t *policy,
struct ldlm_lock *old_lock,
- int flags, int unref)
+ __u64 flags, int unref)
{
struct ldlm_lock *lock;
cfs_list_t *tmp;
* caller code unchanged), the context failure will be discovered by caller
* sometime later.
*/
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
+ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
const struct ldlm_res_id *res_id, ldlm_type_t type,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh, int unref)
ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
struct ldlm_lock **lockp,
- void *cookie, int *flags)
+ void *cookie, __u64 *flags)
{
struct ldlm_lock *lock = *lockp;
struct ldlm_resource *res = lock->l_resource;
{
cfs_list_t *tmp, *pos;
ldlm_processing_policy policy;
- int flags;
+ __u64 flags;
int rc = LDLM_ITER_CONTINUE;
ldlm_error_t err;
ENTRY;
} else {
/* This should never happen, because of the way the
* server handles conversions. */
- LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
+ LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
*flags);
LBUG();
} else {
int rc;
ldlm_error_t err;
- int pflags = 0;
+ __u64 pflags = 0;
ldlm_processing_policy policy;
policy = ldlm_processing_policy_table[res->lr_type];
rc = policy(lock, &pflags, 0, &err, &rpc_list);
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
body->lock_desc = *desc;
- body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
+ body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_AST_FLAGS);
LDLM_DEBUG(lock, "server preparing blocking AST");
}
EXPORT_SYMBOL(ldlm_server_blocking_ast);
-int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
struct ldlm_cb_set_arg *arg = data;
struct ldlm_request *body;
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
- body->lock_flags = flags;
+ body->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2desc(lock, &body->lock_desc);
if (lvb_len > 0) {
void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
/* We only send real blocking ASTs after the lock is granted */
lock_res_and_lock(lock);
if (lock->l_flags & LDLM_FL_AST_SENT) {
- body->lock_flags |= LDLM_FL_AST_SENT;
+ body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
/* copy ast flags like LDLM_FL_DISCARD_DATA */
- body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
+ body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
+ LDLM_AST_FLAGS);
/* We might get here prior to ldlm_handle_enqueue setting
* LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
const struct ldlm_callback_suite *cbs)
{
struct ldlm_reply *dlm_rep;
- __u32 flags;
+ __u64 flags;
ldlm_error_t err = ELDLM_OK;
struct ldlm_lock *lock = NULL;
void *cookie = NULL;
LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
- flags = dlm_req->lock_flags;
+ flags = ldlm_flags_from_wire(dlm_req->lock_flags);
LASSERT(req->rq_export);
if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
lock->l_req_extent = lock->l_policy_data.l_extent;
- err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
+ err = ldlm_lock_enqueue(ns, &lock, cookie, &flags);
if (err)
GOTO(out, err);
dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- dlm_rep->lock_flags = flags;
+ dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2desc(lock, &dlm_rep->lock_desc);
ldlm_lock2handle(lock, &dlm_rep->lock_handle);
/* Now take into account flags to be inherited from original lock
request both in reply to client and in our own lock flags. */
dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
- lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
+ lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
+ LDLM_INHERIT_FLAGS);
/* Don't move a pending lock onto the export if it has already been
* disconnected due to eviction (bug 5683) or server umount (bug 24324).
LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
rc = -ENOTCONN;
} else if (lock->l_flags & LDLM_FL_AST_SENT) {
- dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
+ dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
if (lock->l_granted_mode == lock->l_req_mode) {
/*
* Only cancel lock if it was granted, because it would
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
lock_res_and_lock(lock);
- lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
+ lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
+ LDLM_AST_FLAGS);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* If first_enq is 1 (ie, called from ldlm_lock_enqueue):
* - blocking ASTs have not been sent
* - must call this function with the resource lock held */
-int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list)
+int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ cfs_list_t *work_list)
{
struct ldlm_resource *res = lock->l_resource;
CFS_LIST_HEAD(rpc_list);
* until lock is granted. Suitable for locks enqueued through ptlrpcd, of
* other threads that cannot block for long.
*/
-int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
+int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
{
ENTRY;
* or penultimate cases happen in some other thread.
*
*/
-int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
/* XXX ALLOCATE - 160 bytes */
struct lock_wait_data lwd;
int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_type_t type, ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int *flags,
+ ldlm_mode_t mode, __u64 *flags,
ldlm_blocking_callback blocking,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
- int *flags, void *lvb, __u32 lvb_len,
+ __u64 *flags, void *lvb, __u32 lvb_len,
struct lustre_handle *lockh,int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
lock->l_remote_handle = reply->lock_handle;
}
- *flags = reply->lock_flags;
- lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
+ *flags = ldlm_flags_from_wire(reply->lock_flags);
+ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
+ LDLM_INHERIT_FLAGS);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well */
- lock->l_flags |= reply->lock_flags & LDLM_FL_NO_TIMEOUT;
+ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
+ LDLM_FL_NO_TIMEOUT);
unlock_res_and_lock(lock);
- CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%x\n",
+ CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%llx\n",
lock, reply->lock_handle.cookie, *flags);
/* If enqueue returned a blocked lock but the completion handler has
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t const *policy, int *flags,
+ ldlm_policy_data_t const *policy, __u64 *flags,
void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
int async)
{
/* Dump lock data into the request buffer */
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = *flags;
+ body->lock_flags = ldlm_flags_to_wire(*flags);
body->lock_handle[0] = *lockh;
/* Continue as normal. */
body->lock_handle[0] = lock->l_remote_handle;
body->lock_desc.l_req_mode = new_mode;
- body->lock_flags = *flags;
+ body->lock_flags = ldlm_flags_to_wire(*flags);
ptlrpc_request_set_replen(req);
* LDLM_FL_LOCAL_ONLY if tere is no need in a CANCEL rpc to the server;
* LDLM_FL_CANCELING otherwise;
* LDLM_FL_BL_AST if there is a need in a separate CANCEL rpc. */
-static int ldlm_cli_cancel_local(struct ldlm_lock *lock)
+static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
{
- int rc = LDLM_FL_LOCAL_ONLY;
+ __u64 rc = LDLM_FL_LOCAL_ONLY;
ENTRY;
if (lock->l_conn_export) {
- int local_only;
+ bool local_only;
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING;
- local_only = (lock->l_flags &
- (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
+ local_only = !!(lock->l_flags &
+ (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
rc = (lock->l_flags & LDLM_FL_BL_AST) ?
LDLM_FL_BL_AST : LDLM_FL_CANCELING;
int ldlm_cli_cancel(struct lustre_handle *lockh)
{
struct obd_export *exp;
- int avail, flags, count = 1, rc = 0;
+ int avail, flags, count = 1;
+ __u64 rc = 0;
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
CFS_LIST_HEAD(cancels);
}
rc = ldlm_cli_cancel_local(lock);
- if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
+ if (rc == LDLM_FL_LOCAL_ONLY) {
LDLM_LOCK_RELEASE(lock);
- RETURN(rc < 0 ? rc : 0);
+ RETURN(0);
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* rpc which goes to canceld portal, so we can cancel other lru locks
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
- int left = 0, bl_ast = 0, rc;
+ int left = 0, bl_ast = 0;
+ __u64 rc;
left = count;
cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
- body->lock_flags = flags;
+ body->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2handle(lock, &body->lock_handle[0]);
if (lock->l_lvb_len != 0) {
* This is currently only used for recovery, and we make certain assumptions
* as a result--notably, that we shouldn't cancel locks with refs. -phil */
static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
- int flags)
+ __u64 flags)
{
- cfs_list_t *tmp;
- int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
- int local_only = (flags & LDLM_FL_LOCAL_ONLY);
+ cfs_list_t *tmp;
+ int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
+ bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
do {
struct ldlm_lock *lock = NULL;
cfs_hlist_node_t *hnode, void *arg)
{
struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- int flags = (int)(unsigned long)arg;
+ __u64 flags = *(__u64 *)arg;
cleanup_resource(res, &res->lr_granted, flags);
cleanup_resource(res, &res->lr_converting, flags);
return 0;
}
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
+int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
{
if (ns == NULL) {
CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
return ELDLM_OK;
}
- cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
- (void *)(unsigned long)flags);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
return ELDLM_OK;
}
typedef int (*intent_finish_cb)(struct ptlrpc_request *,
struct inode *parent, struct pnode *pnode,
struct lookup_intent *, int offset, obd_id ino);
-int llu_intent_lock(struct inode *parent, struct pnode *pnode,
- struct lookup_intent *, int flags, intent_finish_cb);
static inline __u64 ll_file_maxbytes(struct inode *inode)
{
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
ldlm_mode_t rc;
- int flags;
+ __u64 flags;
ENTRY;
fid = &llu_i2info(inode)->lli_fid;
struct lustre_handle lockh;
ldlm_policy_data_t policy = { .l_inodebits = { lockpart } };
struct lu_fid *fid;
- int flags;
+ __u64 flags;
ENTRY;
LASSERT(inode);
struct lustre_handle lockh = {0};
ldlm_policy_data_t flock;
- int flags = 0;
+ __u64 flags = 0;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%llu file_lock=%p\n",
LBUG();
}
- CDEBUG(D_DLMTRACE, "inode=%llu, pid=%u, cmd=%d, flags=%#x, mode=%u, "
+ CDEBUG(D_DLMTRACE, "inode=%llu, pid=%u, cmd=%d, flags=%#llx, mode=%u, "
"start="LPX64", end="LPX64"\n", (unsigned long long)st->st_ino,
flock.l_flock.pid, cmd, flags, einfo.ei_mode, flock.l_flock.start,
flock.l_flock.end);
ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
- int flags;
+ __u64 flags;
int i;
ENTRY;
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
ldlm_mode_t rc;
- int flags;
+ __u64 flags;
ENTRY;
fid = &ll_i2info(inode)->lli_fid;
int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lu_fid rpid = op_data->op_fid1;
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lu_fid rpid = op_data->op_fid1;
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
int rc;
int lmv_revalidate_slaves(struct obd_export *exp, struct ptlrpc_request **reqp,
const struct lu_fid *mid, struct lookup_intent *oit,
int master_valid, ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags);
+ __u64 extra_lock_flags);
int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags);
+ __u64 extra_lock_flags);
int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
void *lmm, int lmmsize, struct lookup_intent *it,
int flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags);
+ __u64 extra_lock_flags);
int lmv_allocate_slaves(struct obd_device *obd, struct lu_fid *pid,
struct md_op_data *op, struct lu_fid *fid);
int lmv_revalidate_slaves(struct obd_export *, struct ptlrpc_request **,
const struct lu_fid *, struct lookup_intent *, int,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags);
+ __u64 extra_lock_flags);
int lmv_handle_split(struct obd_export *, const struct lu_fid *);
int lmv_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, int extra_lock_flags)
+ struct ptlrpc_request **req, __u64 extra_lock_flags)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
RETURN(rc);
}
-ldlm_mode_t lmv_lock_match(struct obd_export *exp, int flags,
+ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, ldlm_type_t type,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh)
void *lmm, int lmmsize,
struct lookup_intent *, int,
struct ptlrpc_request **reqp,
- ldlm_blocking_callback cb_blocking, int extra_lock_flags);
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags);
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **req, int extra_lock_flags);
+ struct ptlrpc_request **req, __u64 extra_lock_flags);
int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
cfs_list_t *cancels, ldlm_mode_t mode,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo);
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags,
+ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, ldlm_type_t type,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh);
RETURN(0);
}
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags,
+ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
const struct lu_fid *fid, ldlm_type_t type,
ldlm_policy_data_t *policy, ldlm_mode_t mode,
struct lustre_handle *lockh)
* actually get a lock, just perform the intent. */
if (req->rq_transno || req->rq_replay) {
lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
- lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
+ lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
}
if (rc == ELDLM_LOCK_ABORTED) {
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, void *lmm, int lmmsize,
- struct ptlrpc_request **reqp, int extra_lock_flags)
+ struct ptlrpc_request **reqp, __u64 extra_lock_flags)
{
struct obd_device *obddev = class_exp2obd(exp);
struct ptlrpc_request *req = NULL;
- int flags, saved_flags = extra_lock_flags;
+ __u64 flags, saved_flags = extra_lock_flags;
int rc;
struct ldlm_res_id res_id;
static const ldlm_policy_data_t lookup_policy =
void *lmm, int lmmsize, struct lookup_intent *it,
int lookup_flags, struct ptlrpc_request **reqp,
ldlm_blocking_callback cb_blocking,
- int extra_lock_flags)
+ __u64 extra_lock_flags)
{
struct lustre_handle lockh;
int rc = 0;
struct lookup_intent *it;
struct lustre_handle *lockh;
struct obd_device *obddev;
- int flags = LDLM_FL_HAS_INTENT;
+ __u64 flags = LDLM_FL_HAS_INTENT;
ENTRY;
it = &minfo->mi_it;
MDS_INODELOCK_UPDATE }
};
int rc = 0;
- int flags = LDLM_FL_HAS_INTENT;
+ __u64 flags = LDLM_FL_HAS_INTENT;
ENTRY;
CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
static int mdt_intent_getattr(enum mdt_it_code opcode,
struct mdt_thread_info *info,
struct ldlm_lock **,
- int);
+ __u64);
static int mdt_intent_reint(enum mdt_it_code opcode,
struct mdt_thread_info *info,
struct ldlm_lock **,
- int);
+ __u64);
static struct mdt_it_flavor {
const struct req_format *it_fmt;
int (*it_act)(enum mdt_it_code ,
struct mdt_thread_info *,
struct ldlm_lock **,
- int);
+ __u64);
long it_reint;
} mdt_it_flavor[] = {
[MDT_IT_OPEN] = {
struct ldlm_lock **lockp,
struct ldlm_lock *new_lock,
struct mdt_lock_handle *lh,
- int flags)
+ __u64 flags)
{
struct ptlrpc_request *req = mdt_info_req(info);
struct ldlm_lock *lock = *lockp;
static int mdt_intent_getattr(enum mdt_it_code opcode,
struct mdt_thread_info *info,
struct ldlm_lock **lockp,
- int flags)
+ __u64 flags)
{
struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
struct ldlm_lock *new_lock = NULL;
static int mdt_intent_reint(enum mdt_it_code opcode,
struct mdt_thread_info *info,
struct ldlm_lock **lockp,
- int flags)
+ __u64 flags)
{
struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
struct ldlm_reply *rep = NULL;
}
static int mdt_intent_opc(long itopc, struct mdt_thread_info *info,
- struct ldlm_lock **lockp, int flags)
+ struct ldlm_lock **lockp, __u64 flags)
{
struct req_capsule *pill;
struct mdt_it_flavor *flv;
static int mdt_intent_policy(struct ldlm_namespace *ns,
struct ldlm_lock **lockp, void *req_cookie,
- ldlm_mode_t mode, int flags, void *data)
+ ldlm_mode_t mode, __u64 flags, void *data)
{
struct mdt_thread_info *info;
struct ptlrpc_request *req = req_cookie;
ldlm_mode_t mode,
ldlm_policy_data_t *policy,
const struct ldlm_res_id *res_id,
- int flags, const __u64 *client_cookie)
+ __u64 flags, const __u64 *client_cookie)
{
int rc;
struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
ldlm_policy_data_t *policy = &info->mti_policy;
struct ldlm_res_id *res_id = &info->mti_res_id;
+ __u64 flags = 0;
struct md_site *ms;
int rc;
ENTRY;
policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
if (ms->ms_control_exp == NULL) {
- int flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;
+ flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;
/*
* Current node is controller, that is mdt0, where we should
} else {
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_EX,
ldlm_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
- int flags = 0;
-
/*
* This is the case mdt0 is remote node, issue DLM lock like
* other clients.
/* Take a config lock so we can get cancel notifications */
static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
__u32 type, ldlm_policy_data_t *policy, __u32 mode,
- int *flags, void *bl_cb, void *cp_cb, void *gl_cb,
- void *data, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh)
+ __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+ void *data, __u32 lvb_len, void *lvb_swabber,
+ struct lustre_handle *lockh)
{
struct config_llog_data *cld = (struct config_llog_data *)data;
struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
{
struct lustre_handle lockh = { 0 };
- int rc = 0, rcl, flags = LDLM_FL_NO_LRU;
+ __u64 flags = LDLM_FL_NO_LRU;
+ int rc = 0, rcl;
ENTRY;
LASSERT(cld);
static int mgs_handle(struct ptlrpc_request *req);
-static int mgs_completion_ast_config(struct ldlm_lock *lock, int flags,
+static int mgs_completion_ast_config(struct ldlm_lock *lock, __u64 flags,
void *cbdata)
{
ENTRY;
RETURN(ldlm_completion_ast(lock, flags, cbdata));
}
-static int mgs_completion_ast_ir(struct ldlm_lock *lock, int flags,
+static int mgs_completion_ast_ir(struct ldlm_lock *lock, __u64 flags,
void *cbdata)
{
ENTRY;
ldlm_completion_callback cp = NULL;
struct lustre_handle lockh = { 0 };
struct ldlm_res_id res_id;
- int flags = LDLM_FL_ATOMIC_CB;
+ __u64 flags = LDLM_FL_ATOMIC_CB;
int rc;
ENTRY;
{
struct lprocfs_static_vars lvars;
int rc;
- int lock_flags = 0;
+ __u64 lock_flags = 0;
struct ldlm_res_id res_id = {.name = {1}};
char ns_name[48];
ENTRY;
}
int ofd_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock **lockp,
- void *req_cookie, ldlm_mode_t mode, int flags,
+ void *req_cookie, ldlm_mode_t mode, __u64 flags,
void *data)
{
struct ptlrpc_request *req = req_cookie;
struct ost_lvb *res_lvb, *reply_lvb;
struct ldlm_reply *rep;
ldlm_error_t err;
- int idx, rc;
- int tmpflags = 0, only_liblustre = 1;
+ int idx, rc, only_liblustre = 1;
+ __u64 tmpflags = 0;
struct ldlm_interval_tree *tree;
struct ofd_intent_args arg;
__u32 repsize[3] = {
/* ofd_dlm.c */
int ofd_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock **lockp,
- void *req_cookie, ldlm_mode_t mode, int flags,
+ void *req_cookie, ldlm_mode_t mode, __u64 flags,
void *data);
static inline struct ofd_thread_info * ofd_info(const struct lu_env *env)
{
struct ofd_thread_info *info = ofd_info(env);
struct lustre_handle lockh;
- int flags = LDLM_AST_DISCARD_DATA, rc = 0;
+ __u64 flags = LDLM_AST_DISCARD_DATA, rc = 0;
ldlm_policy_data_t policy = {
.l_extent = { 0, OBD_OBJECT_EOF }
};
/** lock value block */
struct ost_lvb ols_lvb;
/** DLM flags with which osc_lock::ols_lock was enqueued */
- int ols_flags;
+ __u64 ols_flags;
/** osc_lock::ols_lock handle */
struct lustre_handle ols_handle;
struct ldlm_enqueue_info ols_einfo;
extern struct ptlrpc_request_set *PTLRPCD_SET;
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- int *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
obd_enqueue_update_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
policy->l_extent.gid = d->cld_gid;
}
-static int osc_enq2ldlm_flags(__u32 enqflags)
+static __u64 osc_enq2ldlm_flags(__u32 enqflags)
{
- int result = 0;
+ __u64 result = 0;
LASSERT((enqflags & ~CEF_MASK) == 0);
}
static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- int flags, void *data)
+ __u64 flags, void *data)
{
struct cl_env_nest nest;
struct lu_env *env;
/*
* XXX print ldlm lock and einfo properly.
*/
- (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
+ (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
lock->ols_state, lock->ols_owner);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
obd_enqueue_update_f upcall, void *cookie,
- int *flags, int agl, int rc)
+ __u64 *flags, int agl, int rc)
{
int intent = *flags & LDLM_FL_HAS_INTENT;
ENTRY;
__u32 mode;
struct ost_lvb *lvb;
__u32 lvb_len;
- int *flags = aa->oa_flags;
+ __u64 *flags = aa->oa_flags;
/* Make a local copy of a lock handle and a mode, because aa->oa_*
* might be freed anytime after lock upcall has been called. */
* is excluded from the cluster -- such scenarious make the life difficult, so
* release locks just after they are obtained. */
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
- int *flags, ldlm_policy_data_t *policy,
+ __u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
obd_enqueue_update_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
*/
static int ost_lock_get(struct obd_export *exp, struct obdo *oa,
__u64 start, __u64 count, struct lustre_handle *lh,
- int mode, int flags)
+ int mode, __u64 flags)
{
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
struct obd_trans_info *oti)
{
struct ost_body *body, *repbody;
- int rc, flags = 0;
+ __u64 flags = 0;
struct lustre_handle lh = {0,};
+ int rc;
ENTRY;
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
struct obd_ioobj *obj, struct niobuf_remote *nb,
struct lustre_handle *lh)
{
- int flags = 0;
+ __u64 flags = 0;
int nrbufs = obj->ioo_bufcnt;
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
struct quota_body *rep_qbody = NULL, *req_qbody;
struct ldlm_intent *lit;
struct qsd_async_args *aa = (struct qsd_async_args *)arg;
- int flags = LDLM_FL_HAS_INTENT;
+ __u64 flags = LDLM_FL_HAS_INTENT;
ENTRY;
LASSERT(aa->aa_exp);
struct qsd_async_args *aa = NULL;
struct ldlm_intent *lit;
struct quota_body *req_qbody;
- int rc, flags = LDLM_FL_HAS_INTENT;
+ __u64 flags = LDLM_FL_HAS_INTENT;
+ int rc;
ENTRY;
LASSERT(exp != NULL);