-lustre_dlm_flags.[ch]
+lustre_dlm_flags*.[ch]
!Makefile
rm -f $(top_builddir)/lustre/include/$@ ; \
sed '/It has been AutoGen-ed/s/-ed.*/-ed/;s/ *$$//' \
$@ > $(top_builddir)/lustre/include/$@ ; \
+ cp -fp lustre_dlm_flags_wshark.c \
+ $(top_builddir)/lustre/contrib/wireshark/. ; \
else cp $(top_builddir)/lustre/include/$@ . ; fi
install : $(targ) install.sh
flag[ 1] = {
f-name = block_granted;
f-mask = on_wire, blocked;
- f-desc = 'Server placed lock on granted list, or a recovering client wants '
- 'the lock added to the granted list, no questions asked.';
+ f-desc = <<- _EOF_
+ Server placed lock on granted list, or a recovering client wants
+ the lock added to the granted list, no questions asked.
+ _EOF_;
};
flag[ 2] = {
flag[32] = {
f-name = fail_loc;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
Used for marking lock as a target for -EINTR while cp_ast sleep
emulation + race with upcoming bl_ast.
flag[33] = {
f-name = skipped;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
Used while processing the unused list to know that we have already
handled this lock and decided to skip it.
flag[34] = {
f-name = cbpending;
- f-mask = local_only, hide_lock;
+ f-mask = off_wire /* , hide_lock */;
f-desc = 'this lock is being destroyed';
};
flag[35] = {
f-name = wait_noreproc;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'not a real flag, not saved in lock';
};
flag[36] = {
f-name = cancel;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'cancellation callback already run';
};
flag[37] = {
f-name = local_only;
- f-mask = local_only, hide_lock;
- f-desc = 'whatever it might mean';
+ f-mask = off_wire /* , hide_lock */;
+ f-desc = 'whatever it might mean -- never transmitted?';
};
flag[38] = {
f-name = failed;
- f-mask = local_only, gone, hide_lock;
+ f-mask = off_wire, gone /* , hide_lock */;
f-desc = "don't run the cancel callback under ldlm_cli_cancel_unused";
};
flag[39] = {
f-name = canceling;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'lock cancel has already been sent';
};
flag[40] = {
f-name = local;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'local lock (ie, no srv/cli split)';
};
flag[41] = {
f-name = lvb_ready;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
XXX FIXME: This is being added to b_size as a low-risk fix to the
fact that the LVB filling happens _after_ the lock has been granted,
flag[42] = {
f-name = kms_ignore;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
A lock contributes to the known minimum size (KMS) calculation until
it has finished the part of its cancelation that performs write back
flag[43] = {
f-name = cp_reqd;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'completion AST to be executed';
};
flag[44] = {
f-name = cleaned;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'cleanup_resource has already handled the lock';
};
flag[45] = {
f-name = atomic_cb;
- f-mask = local_only, hide_lock;
+ f-mask = off_wire /* , hide_lock */;
f-desc = <<- _EOF_
optimization hint: LDLM can run blocking callback from current context
w/o involving separate thread. in order to decrease cs rate
flag[46] = {
f-name = bl_ast;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
It may happen that a client initiates two operations, e.g. unlink
and mkdir, such that the server sends a blocking AST for conflicting
is taken by the first operation. LDLM_FL_BL_AST is set by
ldlm_callback_handler() in the lock to prevent the Early Lock Cancel
(ELC) code from cancelling it.
-
- LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock
- cache is dropped to let ldlm_callback_handler() return EINVAL to the
- server. It is used when ELC RPC is already prepared and is waiting
- for rpc_lock, too late to send a separate CANCEL RPC.
_EOF_;
};
flag[47] = {
f-name = bl_done;
- f-mask = local_only;
- f-desc = 'whatever it might mean';
+ f-mask = off_wire;
+ f-desc = <<- _EOF_
+ Set by ldlm_cancel_callback() when lock cache is dropped to let
+ ldlm_callback_handler() return EINVAL to the server. It is used when
+ ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ send a separate CANCEL RPC.
+ _EOF_;
};
flag[48] = {
f-name = no_lru;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
Don't put lock into the LRU list, so that it is not canceled due to
aging. Used by MGC locks, they are cancelled only at unmount or by
flag[49] = {
f-name = fail_notified;
- f-mask = local_only, gone;
+ f-mask = off_wire, gone;
f-desc = <<- _EOF_
Set for locks that failed and where the server has been notified.
flag[50] = {
f-name = destroyed;
- f-mask = local_only, gone;
+ f-mask = off_wire, gone;
f-desc = <<- _EOF_
Set for locks that were removed from class hash table and will be
destroyed when last reference to them is released. Set by
flag[51] = {
f-name = server_lock;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'flag whether this is a server namespace lock';
};
flag[52] = {
f-name = res_locked;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
flag[53] = {
f-name = waited;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = <<- _EOF_
It's set once we call ldlm_add_waiting_lock_res_locked()
to start the lock-timeout timer and it will never be reset.
flag[54] = {
f-name = ns_srv;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'Flag whether this is a server namespace lock.';
};
flag[55] = {
f-name = excl;
- f-mask = local_only;
+ f-mask = off_wire;
f-desc = 'Flag whether this lock can be reused. Used by exclusive open.';
};
#define ldlm_clear_%-20s LDLM_CLEAR_FLAG((_l), 1ULL << %2u)\n'
acc_fmt=''
tmpfile=[=(base-name)=]-$$.tmp
-exec 8>&1 1> $tmpfile
+exec 7>&1 1> $tmpfile
[=
FOR flag
ENDFOR flag
=]
-exec 1>&8 8>&-
+exec 1>&7 7>&-
fmt='\n/** l_flags bits marked as "%s" bits */
#define LDLM_FL_%-22s 0x%016XULL\n'
printf "$fmt" all_flags ALL_FLAGS_MASK $allbits
=]
/** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m) (((_l)->l_flags & LDLM_FL_##_m##_MASK) != 0)
/** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
-
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
/** @} subgroup */
/** @} group */
-#ifdef WIRESHARK_COMPILE[=
+#endif /* LDLM_ALL_FLAGS_MASK */
+[=
+(out-push-new (string-append (base-name) "_wshark.c"))
+(define flags-vals "")
+(define dissect "")
+(define init-text "")
+
+(define up-name "")
+(define down-name "")
+
+(define dissect-fmt
+ " dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_%s);\n")
+(out-push-new) \=]
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_%1$s,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_%2$s",
+ /* abbrev */ "lustre.ldlm_fl_%1$s",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_%2$s,
+ /* blurb */ %3$s,
+ /* id */ HFILL
+ }
+ },
+[= (define init-fmt (out-pop #t)) \=]
+/**
+ * \file [=(out-name)=]
+ *
+ * wireshark definitions. This file contains the ldlm lock flag bits
+ * that can be transmitted over the wire. There are many other bits,
+ * but they are not transmitted and not handled here.
+ */
+#ifdef WSHARK_HEAD
+[=
+
FOR flag =][=
- (sprintf "\nstatic int hf_lustre_ldlm_fl_%-20s= -1;"
- (string-downcase! (get "f-name")) ) =][=
-ENDFOR flag =]
-const value_string lustre_ldlm_flags_vals[] = {[=
+ (if (match-value? = "f-mask" "on_wire") (begin
+ (set! temp-txt (get "f-name"))
+ (set! up-name (string-upcase (string->c-name! temp-txt)))
+ (set! down-name (string-downcase temp-txt))
-FOR flag =][=
- (define up-name (string-upcase! (string->c-name! (get "f-name"))))
- (sprintf "\n {LDLM_FL_%-20s \"LDLM_FL_%s\"}," (string-append up-name ",")
- up-name) =][=
+ (set! flags-vals (string-append flags-vals (sprintf
+ "\n {LDLM_FL_%-20s \"LDLM_FL_%s\"},"
+ (string-append up-name ",") up-name )))
+
+ (set! dissect (string-append dissect (sprintf dissect-fmt
+ down-name)))
+
+ (set! init-text (string-append init-text (sprintf init-fmt
+ down-name up-name (c-string (get "f-desc")) )))
+
+ (ag-fprintf 0 "\nstatic int hf_lustre_ldlm_fl_%-20s= -1;"
+ down-name)
+ ) ) =][=
ENDFOR flag =]
+
+const value_string lustre_ldlm_flags_vals[] = {[= (. flags-vals) =]
{ 0, NULL }
};
-#endif /* WIRESHARK_COMPILE */
-[= #
+
+/* IDL: struct ldlm_reply { */
+/* IDL: uint32 lock_flags; */
+/* IDL: uint32 lock_padding; */
+/* IDL: struct ldlm_lock_desc { */
+/* IDL: } lock_desc; */
+/* IDL: struct lustre_handle { */
+/* IDL: } lock_handle; */
+/* IDL: uint64 lock_policy_res1; */
+/* IDL: uint64 lock_policy_res2; */
+/* IDL: } */
+
+static int
+lustre_dissect_element_ldlm_lock_flags(
+ tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_,
+ proto_tree *parent_tree _U_, int hf_index _U_)
+{
+ proto_item *item = NULL;
+ proto_tree *tree = NULL;
+
+ if (parent_tree) {
+ item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
+ tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
+ }
+[= (shell
+"sed '$s/^/ /;$i\\
+ return' <<- \\_EOF_\n" dissect "_EOF_"
+) =]
+}
+#endif /* WSHARK_HEAD */
+
+#ifdef WSHARK_INIT_DATA
+[=
+
+(emit init-text
+"\n#endif /* WSHARK_INIT_DATA */\n")
+(out-pop)
+
+=][= #
// TEST CODE =][=
IF (getenv "TESTING") =][=
* indent-tabs-mode: t
* End:
-\=]
-#endif /* LDLM_ALL_FLAGS_MASK */
+=]
# Note: When using the cbuild script leave the macro undefined
# (default: /usr/lib/wireshark/plugins/$(WS_VERSION))
#
-# For non-root and non-rpmbuilds you might want to set the value to ${HOME}/.wireshark/plugins
+# For non-root and non-rpmbuilds you might want to set the value to
+# ${HOME}/.wireshark/plugins
PLUGIN_DIR =
-CFLAGS = -DINET6 -D_U_=__attribute__\(\(unused\)\) -Wall -Wpointer-arith -g -DXTHREADS -D_REENTRANT -DXUSE_MTSAFE_API -fPIC -DPIC
+CFLAGS = -DINET6 -D_U_=__attribute__\(\(unused\)\) -Wall -Wpointer-arith -g \
+ -DXTHREADS -D_REENTRANT -DXUSE_MTSAFE_API -fPIC -DPIC
ifdef WS_HOME
-#INCS = $(shell echo "-I${WS_HOME} `pkg-config --libs --cflags glib-2.0`")
-INCS = $(shell echo "-I${WS_HOME} $(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
+#INCS = $(shell echo "-I${WS_HOME} `./wsconfig.sh --libs --cflags glib-2.0`")
+INCS := $(shell echo "-I${WS_HOME} $(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
else
-INCS = $(shell pkg-config --libs --cflags wireshark) $(shell echo "$(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
-WS_VERSION = $(shell pkg-config --modversion wireshark)
-LIBDIR = $(shell pkg-config --variable=libdir wireshark)
-CHECK=pkg-config --atleast-version=${MIN_WIRESHARK_VERSION} wireshark
+INCS := $(shell ./wsconfig.sh --libs --cflags wireshark) \
+ $(shell ./wsconfig.sh --libs --cflags glib-2.0) \
+ -I../../include \
+ $(shell echo "$(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
+WS_VERSION := $(shell ./wsconfig.sh --modversion wireshark)
+LIBDIR := $(shell ./wsconfig.sh --variable=libdir wireshark)
+CHECK=./wsconfig.sh --atleast-version=${MIN_WIRESHARK_VERSION} wireshark
endif
CFLAGS += $(INCS)
CC = gcc
-OBJS_LNET = $(foreach src, $(SRCS_LNET), $(src:.c=.o))
-OBJS_LUSTRE = $(foreach src, $(SRCS_LUSTRE), $(src:.c=.o))
+OBJS_LNET := $(foreach src, $(SRCS_LNET), $(src:.c=.o))
+OBJS_LUSTRE := $(foreach src, $(SRCS_LUSTRE), $(src:.c=.o))
PLUGINS=lnet.so lustre.so
endif
-all: check $(PLUGINS)
+all: check $(PLUGINS) lustre_dlm_flags_wshark.c
+
+lustre_dlm_flags_wshark.c :
+ cd ../../../contrib/bit-masks ; \
+ make
+ test -f lustre_dlm_flags_wshark.c
check:
@if ! ${CHECK}; then\
--- /dev/null
+/**
+ * \file lustre_dlm_flags_wshark.c
+ *
+ * wireshark definitions. This file contains the ldlm lock flag bits
+ * that can be transmitted over the wire. There are many other bits,
+ * but they are not transmitted and not handled here.
+ */
+#ifdef WSHARK_HEAD
+
+static int hf_lustre_ldlm_fl_lock_changed = -1;
+static int hf_lustre_ldlm_fl_block_granted = -1;
+static int hf_lustre_ldlm_fl_block_conv = -1;
+static int hf_lustre_ldlm_fl_block_wait = -1;
+static int hf_lustre_ldlm_fl_ast_sent = -1;
+static int hf_lustre_ldlm_fl_replay = -1;
+static int hf_lustre_ldlm_fl_intent_only = -1;
+static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_flock_deadlock = -1;
+static int hf_lustre_ldlm_fl_discard_data = -1;
+static int hf_lustre_ldlm_fl_no_timeout = -1;
+static int hf_lustre_ldlm_fl_block_nowait = -1;
+static int hf_lustre_ldlm_fl_test_lock = -1;
+static int hf_lustre_ldlm_fl_cancel_on_block = -1;
+static int hf_lustre_ldlm_fl_deny_on_contention = -1;
+static int hf_lustre_ldlm_fl_ast_discard_data = -1;
+
+const value_string lustre_ldlm_flags_vals[] = {
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ { 0, NULL }
+};
+
+/* IDL: struct ldlm_reply { */
+/* IDL: uint32 lock_flags; */
+/* IDL: uint32 lock_padding; */
+/* IDL: struct ldlm_lock_desc { */
+/* IDL: } lock_desc; */
+/* IDL: struct lustre_handle { */
+/* IDL: } lock_handle; */
+/* IDL: uint64 lock_policy_res1; */
+/* IDL: uint64 lock_policy_res2; */
+/* IDL: } */
+
+static int
+lustre_dissect_element_ldlm_lock_flags(
+ tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_,
+ proto_tree *parent_tree _U_, int hf_index _U_)
+{
+ proto_item *item = NULL;
+ proto_tree *tree = NULL;
+
+ if (parent_tree) {
+ item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
+ tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
+ }
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lock_changed);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_granted);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_conv);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_wait);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_sent);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_replay);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_intent_only);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_has_intent);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_flock_deadlock);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_discard_data);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_timeout);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_nowait);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_test_lock);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel_on_block);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_deny_on_contention);
+ return
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_discard_data);
+}
+#endif /* WSHARK_HEAD */
+
+#ifdef WSHARK_INIT_DATA
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_lock_changed,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_LOCK_CHANGED",
+ /* abbrev */ "lustre.ldlm_fl_lock_changed",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_LOCK_CHANGED,
+ /* blurb */ "extent, mode, or resource changed",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_block_granted,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_BLOCK_GRANTED",
+ /* abbrev */ "lustre.ldlm_fl_block_granted",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_BLOCK_GRANTED,
+ /* blurb */ "Server placed lock on granted list, or a recovering client wants\n"
+ "the lock added to the granted list, no questions asked.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_block_conv,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_BLOCK_CONV",
+ /* abbrev */ "lustre.ldlm_fl_block_conv",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_BLOCK_CONV,
+ /* blurb */ "Server placed lock on conv list, or a recovering client wants the lock\n"
+ "added to the conv list, no questions asked.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_block_wait,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_BLOCK_WAIT",
+ /* abbrev */ "lustre.ldlm_fl_block_wait",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_BLOCK_WAIT,
+ /* blurb */ "Server placed lock on wait list, or a recovering client wants\n"
+ "the lock added to the wait list, no questions asked.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_ast_sent,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_AST_SENT",
+ /* abbrev */ "lustre.ldlm_fl_ast_sent",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_AST_SENT,
+ /* blurb */ "blocking or cancel packet was queued for sending.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_replay,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_REPLAY",
+ /* abbrev */ "lustre.ldlm_fl_replay",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_REPLAY,
+ /* blurb */ "Lock is being replayed. This could probably be implied by the fact that\n"
+ "one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_intent_only,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_INTENT_ONLY",
+ /* abbrev */ "lustre.ldlm_fl_intent_only",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_INTENT_ONLY,
+ /* blurb */ "Don't grant lock, just do intent.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_has_intent,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_HAS_INTENT",
+ /* abbrev */ "lustre.ldlm_fl_has_intent",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_HAS_INTENT,
+ /* blurb */ "lock request has intent",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_flock_deadlock,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_FLOCK_DEADLOCK",
+ /* abbrev */ "lustre.ldlm_fl_flock_deadlock",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_FLOCK_DEADLOCK,
+ /* blurb */ "flock deadlock detected",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_discard_data,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_DISCARD_DATA",
+ /* abbrev */ "lustre.ldlm_fl_discard_data",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_DISCARD_DATA,
+ /* blurb */ "discard (no writeback) on cancel",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_no_timeout,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_NO_TIMEOUT",
+ /* abbrev */ "lustre.ldlm_fl_no_timeout",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_NO_TIMEOUT,
+ /* blurb */ "Blocked by group lock - wait indefinitely",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_block_nowait,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_BLOCK_NOWAIT",
+ /* abbrev */ "lustre.ldlm_fl_block_nowait",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_BLOCK_NOWAIT,
+ /* blurb */ "Server told not to wait if blocked. For AGL, OST will not send\n"
+ "glimpse callback.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_test_lock,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_TEST_LOCK",
+ /* abbrev */ "lustre.ldlm_fl_test_lock",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_TEST_LOCK,
+ /* blurb */ "return blocking lock",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_cancel_on_block,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_CANCEL_ON_BLOCK",
+ /* abbrev */ "lustre.ldlm_fl_cancel_on_block",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_CANCEL_ON_BLOCK,
+ /* blurb */ "Immediatelly cancel such locks when they block some other locks. Send\n"
+ "cancel notification to original lock holder, but expect no reply. This is\n"
+ "for clients (like liblustre) that cannot be expected to reliably response\n"
+ "to blocking AST.",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_deny_on_contention,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_DENY_ON_CONTENTION",
+ /* abbrev */ "lustre.ldlm_fl_deny_on_contention",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_DENY_ON_CONTENTION,
+ /* blurb */ "measure lock contention and return -EUSERS if locking contention is high",
+ /* id */ HFILL
+ }
+ },
+ {
+ /* p_id */ &hf_lustre_ldlm_fl_ast_discard_data,
+ /* hfinfo */ {
+ /* name */ "LDLM_FL_AST_DISCARD_DATA",
+ /* abbrev */ "lustre.ldlm_fl_ast_discard_data",
+ /* type */ FT_BOOLEAN,
+ /* display */ 32,
+ /* strings */ TFS(&lnet_flags_set_truth),
+ /* bitmask */ LDLM_FL_AST_DISCARD_DATA,
+ /* blurb */ "These are flags that are mapped into the flags and ASTs of blocking locks\n"
+ "Add FL_DISCARD to blocking ASTs",
+ /* id */ HFILL
+ }
+ },
+
+#endif /* WSHARK_INIT_DATA */
guint64 match;
guint32 msg_type;
-
+/*
lnet_request_val_t* conversation_val ;
-
+*/
if (check_col(pinfo->cinfo, COL_PROTOCOL)) {
col_set_str(pinfo->cinfo, COL_PROTOCOL, "Lnet");
}
}
- conversation_val = get_lnet_conv(pinfo , lnet_request_hash, match );
- /* proto_tree_add_text(tree, tvb, 0 , 0, "match = %" G_GINT64_MODIFIER "u parent = %d", conversation_val -> match_bits , conversation_val -> packet_num_parent); */
+ /* conversation_val = */
+ get_lnet_conv(pinfo , lnet_request_hash, match );
+ /* proto_tree_add_text(tree, tvb, 0 , 0, "match = %"
+ G_GINT64_MODIFIER "u parent = %d",
+ conversation_val -> match_bits ,
+ conversation_val -> packet_num_parent); */
/* padding */
#include <epan/packet.h>
#include <epan/dissectors/packet-windows-common.h>
+#include "lustre_dlm_flags.h"
const true_false_string lnet_flags_set_truth = { "Set", "Unset" };
FLD_FIRST_OPC = FLD_QUERY
};
-#define WIRESHARK_COMPILE
-#include "lustre_dlm_flags.h"
-
#define LDLM_ENQUEUE (101)
#define LDLM_CONVERT (102)
#define LDLM_CANCEL (103)
/* proto declaration */
static gint proto_lustre = -1;
+typedef int (dissect_func)(
+ tvbuff_t *tvb, gint offset, packet_info *pinfo _U_, proto_tree *tree,
+ int hfindex);
+static dissect_func dissect_uint64, dissect_uint32, dissect_uint16, dissect_uint8;
+#define WSHARK_HEAD
+#include "lustre_dlm_flags_wshark.c"
+#undef WSHARK_HEAD
static int ldlm_opcode_process(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree * tree _U_, guint64 intent_opc _U_) ;
-static int lustre_dissect_element_ldlm_lock_flags(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, int hf_index _U_);
static int add_extra_padding(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree * tree _U_) ;
return offset;
}
-
-/* IDL: struct ldlm_reply { */
-/* IDL: uint32 lock_flags; */
-/* IDL: uint32 lock_padding; */
-/* IDL: struct ldlm_lock_desc { */
-/* IDL: } lock_desc; */
-/* IDL: struct lustre_handle { */
-/* IDL: } lock_handle; */
-/* IDL: uint64 lock_policy_res1; */
-/* IDL: uint64 lock_policy_res2; */
-/* IDL: } */
-
-static int
-lustre_dissect_element_ldlm_lock_flags(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, int hf_index _U_)
-{
- proto_item *item = NULL;
- proto_tree *tree = NULL;
-
- if (parent_tree) {
- item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
- tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
- }
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_discard_data);
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_deny_on_contention);
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_done );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_ast );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_atomic_cb );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cleaned );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cp_reqd );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel_on_block );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_lru );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_kms_ignore );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lvb_ready );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_test_lock );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_nowait );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_timeout );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_discard_data );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_warn );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_local );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_canceling );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_has_intent );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_failed );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_local_only );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_intent_only );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_replay );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_wait_noreproc );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_sent );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cbpending );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_wait );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_conv );
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_granted );
- offset=dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lock_changed );
- return offset;
-}
-
static int
lustre_dissect_element_ldlm_reply_lock_padding(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_)
{
{ &hf_lustre_ldlm_reply_lock_flags,
{ "Lock Flags", "lustre.ldlm_reply.lock_flags", FT_UINT32,BASE_HEX, NULL, 0, "", HFILL }},
- {&hf_lustre_ldlm_fl_lock_changed, {"LDLM_FL_LOCK_CHANGED", "lustre.ldlm_fl_lock_changed", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCK_CHANGED, "", HFILL } },
- {&hf_lustre_ldlm_fl_block_granted, {"LDLM_FL_BLOCK_GRANTED", "lustre.ldlm_fl_block_granted", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_GRANTED, "", HFILL } },
- {&hf_lustre_ldlm_fl_block_conv, {"LDLM_FL_BLOCK_CONV", "lustre.ldlm_fl_block_conv", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_CONV, "", HFILL } },
- {&hf_lustre_ldlm_fl_block_wait, {"LDLM_FL_BLOCK_WAIT", "lustre.ldlm_fl_block_wait", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_WAIT, "", HFILL } },
- {&hf_lustre_ldlm_fl_cbpending, {"LDLM_FL_CBPENDING", "lustre.ldlm_fl_cbpending", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CBPENDING, "", HFILL } },
- {&hf_lustre_ldlm_fl_ast_sent, {"LDLM_FL_AST_SENT", "lustre.ldlm_fl_ast_sent", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_AST_SENT, "", HFILL } },
- {&hf_lustre_ldlm_fl_wait_noreproc, {"LDLM_FL_WAIT_NOREPROC", "lustre.ldlm_fl_wait_noreproc", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_WAIT_NOREPROC, "", HFILL } },
- {&hf_lustre_ldlm_fl_cancel, {"LDLM_FL_CANCEL", "lustre.ldlm_fl_cancel", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCEL, "", HFILL } },
- {&hf_lustre_ldlm_fl_replay, {"LDLM_FL_REPLAY", "lustre.ldlm_fl_replay", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_REPLAY, "", HFILL } },
- {&hf_lustre_ldlm_fl_intent_only, {"LDLM_FL_INTENT_ONLY", "lustre.ldlm_fl_intent_only", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_INTENT_ONLY, "", HFILL } },
- {&hf_lustre_ldlm_fl_local_only, {"LDLM_FL_LOCAL_ONLY", "lustre.ldlm_fl_local_only", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCAL_ONLY, "", HFILL } },
- {&hf_lustre_ldlm_fl_failed, {"LDLM_FL_FAILED", "lustre.ldlm_fl_failed", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_FAILED, "", HFILL } },
- {&hf_lustre_ldlm_fl_has_intent, {"LDLM_FL_HAS_INTENT", "lustre.ldlm_fl_has_intent", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_HAS_INTENT, "", HFILL } },
- {&hf_lustre_ldlm_fl_canceling, {"LDLM_FL_CANCELING", "lustre.ldlm_fl_canceling", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCELING, "", HFILL } },
- {&hf_lustre_ldlm_fl_local, {"LDLM_FL_LOCAL", "lustre.ldlm_fl_local", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCAL, "", HFILL } },
- {&hf_lustre_ldlm_fl_warn, {"LDLM_FL_WARN", "lustre.ldlm_fl_warn", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_WARN, "", HFILL } },
- {&hf_lustre_ldlm_fl_discard_data, {"LDLM_FL_DISCARD_DATA", "lustre.ldlm_fl_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_DISCARD_DATA, "", HFILL } },
- {&hf_lustre_ldlm_fl_no_timeout, {"LDLM_FL_NO_TIMEOUT", "lustre.ldlm_fl_no_timeout", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_NO_TIMEOUT, "", HFILL } },
- {&hf_lustre_ldlm_fl_block_nowait, {"LDLM_FL_BLOCK_NOWAIT", "lustre.ldlm_fl_block_nowait", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_NOWAIT, "", HFILL } },
- {&hf_lustre_ldlm_fl_test_lock, {"LDLM_FL_TEST_LOCK", "lustre.ldlm_fl_test_lock", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_TEST_LOCK, "", HFILL } },
- {&hf_lustre_ldlm_fl_lvb_ready, {"LDLM_FL_LVB_READY", "lustre.ldlm_fl_lvb_ready", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LVB_READY, "", HFILL } },
- {&hf_lustre_ldlm_fl_kms_ignore, {"LDLM_FL_KMS_IGNORE", "lustre.ldlm_fl_kms_ignore", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_KMS_IGNORE, "", HFILL } },
- {&hf_lustre_ldlm_fl_no_lru, {"LDLM_FL_NO_LRU", "lustre.ldlm_fl_no_lru", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_NO_LRU, "", HFILL } },
- {&hf_lustre_ldlm_fl_cancel_on_block, {"LDLM_FL_CANCEL_ON_BLOCK", "lustre.ldlm_fl_cancel_on_block", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCEL_ON_BLOCK, "", HFILL } },
- {&hf_lustre_ldlm_fl_cp_reqd, {"LDLM_FL_CP_REQD", "lustre.ldlm_fl_cp_reqd", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CP_REQD, "", HFILL } },
- {&hf_lustre_ldlm_fl_cleaned, {"LDLM_FL_CLEANED", "lustre.ldlm_fl_cleaned", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CLEANED, "", HFILL } },
- {&hf_lustre_ldlm_fl_atomic_cb, {"LDLM_FL_ATOMIC_CB", "lustre.ldlm_fl_atomic_cb", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_ATOMIC_CB, "", HFILL } },
- {&hf_lustre_ldlm_fl_bl_ast, {"LDLM_FL_BL_AST", "lustre.ldlm_fl_bl_ast", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_AST, "", HFILL } },
- {&hf_lustre_ldlm_fl_bl_done, {"LDLM_FL_BL_DONE", "lustre.ldlm_fl_bl_done", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_DONE, "", HFILL } },
- {&hf_lustre_ldlm_fl_deny_on_contention, {"LDLM_FL_DENY_ON_CONTENTION", "lustre.ldlm_fl_deny_on_contention", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_DENY_ON_CONTENTION, "", HFILL } },
- {&hf_lustre_ldlm_fl_ast_discard_data, {"LDLM_AST_DISCARD_DATA", "lustre.ldlm_ast_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_AST_DISCARD_DATA, "", HFILL } },
+#define WSHARK_INIT_DATA
+#include "lustre_dlm_flags_wshark.c"
+#undef WSHARK_INIT_DATA
{ &hf_lustre_obdo_o_misc,
{ "O Misc", "lustre.obdo.o_misc", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
# This file contain all configuration information to build
# `lustre-release/lustre/contrib/wireshark'
-###########################################################################
-# #
-# DOWNLOAD CONFIGURATION
-# #
-###########################################################################
+[[ $1 =~ --.* ]] || {
+ ###########################################################################
+ # #
+ # DOWNLOAD CONFIGURATION
+ # #
+ ###########################################################################
-## BEGIN: -can-edit ##
+ ## BEGIN: -can-edit ##
# URL of directory containing all source tar balls
-export WS_DOWNLOAD_BASE_URL='http://wiresharkdownloads.riverbed.com/wireshark/src/all-versions'
+ export WS_DOWNLOAD_BASE_URL='http://wiresharkdownloads.riverbed.com'
+ WS_DOWNLOAD_BASE_URL+='/wireshark/src/all-versions'
# wireshark verion to be used
-export WS_VERSION='1.6.8'
-## END : -can-edit ##
+ export WS_VERSION='1.6.8'
+ ## END : -can-edit ##
# URL of the wireshark source code tarball
# Implicit assumption: Wireshark release names follow the nameing
# convention coded in the content of the following varialble
-export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
+ export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
-###########################################################################
-# #
-# BUILD ENVIRONMENT #
-# #
-###########################################################################
+ ###########################################################################
+ # #
+ # BUILD ENVIRONMENT #
+ # #
+ ###########################################################################
-## BEGIN: -can-edit ##
+ ## BEGIN: -can-edit ##
# Space separate list of RPMs needed to be installed for
# compilation of wireshark
# If distributions 'marked' by same release file, content has to
# parsed and variable PREREQUISITE_RPMS has to be set accoringly to
# package name(s) used for each distro.
-if [ -r /etc/redhat-release ] ; then
- export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap libpcap-devel perl'
-elif [ -r /etc/SuSE-release ] ; then
- export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap0 libpcap-devel perl'
-fi
+ if [ -r /etc/redhat-release ] ; then
+ export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap libpcap-devel perl'
+ elif [ -r /etc/SuSE-release ] ; then
+ export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap0 libpcap-devel perl'
+ fi
# Include and linker flags needed to Lustre/LNet
# Only version indepent information should be added here
# (Back ticked expression will be evaluated by make command)
-export PLUGIN_COMPILE_FLAGS='`pkg-config --libs --cflags glib-2.0`'
-## END : -can-edit ##
+ export PLUGIN_COMPILE_FLAGS='`pkg-config --libs --cflags glib-2.0`'
+ ## END : -can-edit ##
- # Top-level directory to be used to unpack/compile/install wireshark/lustre-git-repo
-export BUILD_DIR=`pwd`
+ # Top-level directory to be used to unpack/compile/install
+ # wireshark/lustre-git-repo
+ export BUILD_DIR=`pwd`
# Directory location of wireshark source code
-export WS_HOME="${BUILD_DIR}/wireshark-${WS_VERSION}"
+ export WS_HOME="${BUILD_DIR}/wireshark-${WS_VERSION}"
# (Relative) path of the wireshark contribution directory
-export LUSTRE_WS_DIR='lustre-release/lustre/contrib/wireshark'
+ export LUSTRE_WS_DIR='lustre-release/lustre/contrib/wireshark'
# RPM internal name for the Lustre/LNet plugins
-export PLUGIN_RPM_NAME='lustre-wireshark-plugins'
+ export PLUGIN_RPM_NAME='lustre-wireshark-plugins'
# TAR command + options to be used to create a bzip2 tarball
-export TAR='/bin/tar jcpf '
+ export TAR='/bin/tar jcpf '
# TAR command + options to be used to unpack a bzip2 tarball
-export UNTAR='/bin/tar jxpf '
+ export UNTAR='/bin/tar jxpf '
+ exit 0
+}
+
+die() {
+ echo "wsconfig error: $*"
+ exit 1
+} 1>&2
+
+# arg1: complete package name, with version
+# arg2: the minimum version
+#
+chk_ver() {
+ act_ver=${1#*-devel-} ; shift
+ act_ver=${act_ver%%-*}
+
+ declare low_ver=$(
+ printf "${act_ver}\n$1\n" | sort -V | head -n1 )
+ test "X$low_ver" = "X$1" || \
+ die "wireshark too old: $act_ver is before $1"
+}
+
+set_var() {
+ case "X$2" in
+ Xlibdir )
+ txt=$(echo $(rpm -q --list $1 | \
+ sed -n '\@/libwire@s@/libwire[^/]*$@@p' | \
+ sort -u) )
+ ;;
+ * )
+ die "unknown variable: $2"
+ ;;
+ esac
+}
+
+set_cflags() {
+ dlst=$(rpm -q --list $pkg | \
+ grep '/usr.*/include.*/wireshark$' | \
+ while read f ; do test -d $f && echo "$f" ; done)
+ rm -f config.h
+ for f in $dlst XX
+ do test -f $f/config.h && ln -s ${f}/config.h .
+ txt+=" -I$f"
+ done
+ test -f config.h || die "cannot find config header"
+}
+
+parse_wireshark() {
+ declare pkg=$(rpm -qa | sed -n '/wireshark-devel/{;p;q;}')
+ declare dlst=
+
+ while test $# -gt 1
+ do
+ txt=
+ case "$1" in
+ --libs )
+ txt=$(rpm -q --list $pkg | \
+ sed -n 's@\.so$@@p' | \
+ sed 's@.*/lib@-l@')
+ ;;
+
+ --cflags )
+ set_cflags
+ ;;
+
+ --modversion )
+ txt=${pkg#wireshark-devel-}
+ txt=${txt%%-*}
+ ;;
+
+ --atleast-version=* )
+ chk_ver ${pkg} ${1#*=}
+ ;;
+
+ --atleast-version )
+ shift
+ chk_ver ${pkg} ${1}
+ ;;
+
+ --variable=* )
+ set_var ${pkg} ${1#*=}
+ ;;
+
+ --variable )
+ shift
+ set_var ${pkg} ${1}
+ ;;
+
+ * )
+ die "unknown option: $1"
+ ;;
+ esac
+ test ${#txt} -gt 0 && \
+ printf "%s" "$(echo ' '$txt)"
+ shift
+ done
+ echo
+}
+
+pkg-config "$@" 2>/dev/null && exit 0
+
+pkg=$#
+case ${!pkg} in
+glib* )
+ fullpkg=$(rpm -qa | grep -E '^glib[2-9].*-devel' | head -n1)
+ dirs=$(rpm -q --list $fullpkg | \
+ while read f ; do test -d $f && echo $f ; done | \
+ grep -F /include)
+ for f in $dirs ; do printf "-I$f " ; done
+ rpm -q --list $fullpkg | \
+ sed -n 's@^.*/libglib@-lglib@p' | \
+ sed -n 's/\.so$//p' | \
+ head -n 1
+ ;;
+
+wireshark )
+ parse_wireshark "$@"
+ ;;
+
+* )
+ echo huh?
+ exit 1
+ ;;
+esac
/** l_flags bits marked as "gone" bits */
#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
-
/** l_flags bits marked as "inherit" bits */
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
+/** l_flags bits marked as "off_wire" bits */
+#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
/** l_flags bits marked as "on_wire" bits */
#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
#define ldlm_set_cancel(_l) LDLM_SET_FLAG(( _l), 1ULL << 36)
#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-/** whatever it might mean */
+/** whatever it might mean -- never transmitted? */
#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37
#define ldlm_is_local_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 37)
#define ldlm_set_local_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 37)
* to this client for the first operation, whereas the second operation
* has canceled this lock and is waiting for rpc_lock which is taken by
* the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
- * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC. */
+ * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it. */
#define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46
#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG(( _l), 1ULL << 46)
#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG(( _l), 1ULL << 46)
#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-/** whatever it might mean */
+/**
+ * Set by ldlm_cancel_callback() when lock cache is dropped to let
+ * ldlm_callback_handler() return EINVAL to the server. It is used when
+ * ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ * send a separate CANCEL RPC. */
#define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47
#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG(( _l), 1ULL << 47)
#define ldlm_set_bl_done(_l) LDLM_SET_FLAG(( _l), 1ULL << 47)
#define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
/** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m) (((_l)->l_flags & LDLM_FL_##_m##_MASK) != 0)
/** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
-
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
/** @} subgroup */
/** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data = -1;
-static int hf_lustre_ldlm_fl_fail_loc = -1;
-static int hf_lustre_ldlm_fl_skipped = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_fail_notified = -1;
-static int hf_lustre_ldlm_fl_destroyed = -1;
-static int hf_lustre_ldlm_fl_server_lock = -1;
-static int hf_lustre_ldlm_fl_res_locked = -1;
-static int hf_lustre_ldlm_fl_waited = -1;
-static int hf_lustre_ldlm_fl_ns_srv = -1;
-static int hf_lustre_ldlm_fl_excl = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
- { 0, NULL }
-};
-#endif /* WIRESHARK_COMPILE */
#endif /* LDLM_ALL_FLAGS_MASK */
+
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+ if (!ldlm_is_ns_srv(lock))
spin_lock(&lock->l_lock);
lock_res(lock->l_resource);
- lock->l_flags |= LDLM_FL_RES_LOCKED;
+ ldlm_set_res_locked(lock);
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+ ldlm_clear_res_locked(lock);
unlock_res(lock->l_resource);
- if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+ if (!ldlm_is_ns_srv(lock))
spin_unlock(&lock->l_lock);
}
EXPORT_SYMBOL(unlock_res_and_lock);
req->l_policy_data.l_extent.start) &&
(lock->l_policy_data.l_extent.end >=
req->l_policy_data.l_extent.end))) {
- /* If we met a PR lock just like us or wider,
- and nobody down the list conflicted with
- it, that means we can skip processing of
- the rest of the list and safely place
- ourselves at the end of the list, or grant
- (dependent if we met an conflicting locks
- before in the list).
- In case of 1st enqueue only we continue
- traversing if there is something conflicting
- down the list because we need to make sure
- that something is marked as AST_SENT as well,
- in cse of empy worklist we would exit on
- first conflict met. */
- /* There IS a case where such flag is
- not set for a lock, yet it blocks
- something. Luckily for us this is
- only during destroy, so lock is
- exclusive. So here we are safe */
- if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
- RETURN(compat);
- }
+ /* If we met a PR lock just like us or
+ wider, and nobody down the list
+ conflicted with it, that means we
+ can skip processing of the rest of
+ the list and safely place ourselves
+ at the end of the list, or grant
+ (dependent if we met an conflicting
+ locks before in the list). In case
+ of 1st enqueue only we continue
+ traversing if there is something
+ conflicting down the list because
+ we need to make sure that something
+ is marked as AST_SENT as well, in
+ cse of empy worklist we would exit
+ on first conflict met. */
+ /* There IS a case where such flag is
+ not set for a lock, yet it blocks
+ something. Luckily for us this is
+ only during destroy, so lock is
+ exclusive. So here we are safe */
+ if (!ldlm_is_ast_sent(lock))
+ RETURN(compat);
}
/* non-group locks are compatible, overlap doesn't
cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
cfs_list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- lock->l_flags &= ~LDLM_FL_AST_SENT;
+ LASSERT(ldlm_is_ast_sent(lock));
+ ldlm_clear_ast_sent(lock);
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
LDLM_LOCK_RELEASE(lock->l_blocking_lock);
LASSERT(cfs_list_empty(&res->lr_converting));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
- !(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
+ !ldlm_is_ast_discard_data(lock));
check_res_locked(res);
*err = ELDLM_OK;
* in ldlm_lock_destroy. Anyway, this always happens
* when a client is being evicted. So it would be
* ok to return an error. -jay */
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
*err = -EAGAIN;
GOTO(out, rc = -EAGAIN);
}
RETURN(0);
out:
if (!cfs_list_empty(&rpc_list)) {
- LASSERT(!(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
+ LASSERT(!ldlm_is_ast_discard_data(lock));
discard_bl_list(&rpc_list);
}
RETURN(rc);
/* don't let another thread in ldlm_extent_shift_kms race in
* just after we finish and take our lock into account in its
* calculation of the kms */
- lock->l_flags |= LDLM_FL_KMS_IGNORE;
+ ldlm_set_kms_ignore(lock);
cfs_list_for_each(tmp, &res->lr_granted) {
lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
- if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+ if (ldlm_is_kms_ignore(lck))
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
static inline void
ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
{
- ENTRY;
+ ENTRY;
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
- cfs_list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* client side - set a flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+ cfs_list_del_init(&lock->l_res_link);
+ if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
+ /* client side - set a flag to prevent sending a CANCEL */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
/* when reaching here, it is under lock_res_and_lock(). Thus,
need call the nolock version of ldlm_lock_decref_internal*/
"support flock canceliation\n");
} else {
LASSERT(lock->l_completion_ast);
- LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+ LASSERT(!ldlm_is_ast_sent(lock));
lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
LDLM_FL_FLOCK_DEADLOCK;
ldlm_flock_blocking_unlink(lock);
ldlm_flock_blocking_unlink(lock);
/* client side - set flag to prevent lock from being put on LRU list */
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
EXIT;
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_flags & LDLM_FL_FAILED) {
+ if (ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
RETURN(-EIO);
}
/* Protect against race where lock could have been just destroyed
* due to overlap in ldlm_process_flock_lock().
*/
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
RETURN(0);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
cfs_list_del_init(&lock->l_res_link);
- if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
} else if (flags & LDLM_FL_TEST_LOCK) {
int ret = 0;
lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+ if ((lock->l_req_mode == lock->l_granted_mode) &&
+ !ldlm_is_cp_reqd(lock))
+ ret = 1;
+ else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
unlock_res_and_lock(lock);
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(ldlm_is_destroyed(lock));
LASSERT(cfs_list_empty(&lock->l_res_link));
LASSERT(cfs_list_empty(&lock->l_pending_chain));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
cfs_list_del_init(&lock->l_lru);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
+ ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused > 0);
ns->ns_nr_unused--;
rc = 1;
int rc;
ENTRY;
- if (lock->l_flags & LDLM_FL_NS_SRV) {
+ if (ldlm_is_ns_srv(lock)) {
LASSERT(cfs_list_empty(&lock->l_lru));
RETURN(0);
}
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
ENTRY;
- if (lock->l_flags & LDLM_FL_NS_SRV) {
+ if (ldlm_is_ns_srv(lock)) {
LASSERT(cfs_list_empty(&lock->l_lru));
EXIT;
return;
LBUG();
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
LASSERT(cfs_list_empty(&lock->l_lru));
EXIT;
return 0;
}
- lock->l_flags |= LDLM_FL_DESTROYED;
+ ldlm_set_destroyed(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
+ if ((flags == 0) && !ldlm_is_destroyed(lock)) {
lu_ref_add(&lock->l_reference, "handle", current);
RETURN(lock);
}
LASSERT(lock->l_resource != NULL);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
RETURN(NULL);
}
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- RETURN(NULL);
- }
+ /* If we're setting flags, make sure none of them are already set. */
+ if (flags != 0) {
+ if ((lock->l_flags & flags) != 0) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ RETURN(NULL);
+ }
- if (flags)
lock->l_flags |= flags;
+ }
unlock_res_and_lock(lock);
RETURN(lock);
void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
cfs_list_t *work_list)
{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
- LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
- /* If the enqueuing client said so, tell the AST recipient to
- * discard dirty data, rather than writing back. */
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ if (!ldlm_is_ast_sent(lock)) {
+ LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
+ ldlm_set_ast_sent(lock);
+ /* If the enqueuing client said so, tell the AST recipient to
+ * discard dirty data, rather than writing back. */
+ if (ldlm_is_ast_discard_data(new))
+ ldlm_set_discard_data(lock);
LASSERT(cfs_list_empty(&lock->l_bl_ast));
cfs_list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
*/
void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
+ if (!ldlm_is_cp_reqd(lock)) {
+ ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
LASSERT(cfs_list_empty(&lock->l_cp_ast));
cfs_list_add(&lock->l_cp_ast, work_list);
if (lock != NULL) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !ldlm_is_cbpending(lock)) {
ldlm_lock_addref_internal_nolock(lock, mode);
result = 0;
}
ldlm_lock_decref_internal_nolock(lock, mode);
- if (lock->l_flags & LDLM_FL_LOCAL &&
+ if (ldlm_is_local(lock) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock. */
CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
}
if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_is_cbpending(lock)) {
/* If we received a blocked AST and this was the last reference,
* run the callback. */
- if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
+ if (ldlm_is_ns_srv(lock) && lock->l_export)
CERROR("FL_CBPENDING set on non-local lock--just a "
"warning\n");
ldlm_lock_remove_from_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
ldlm_handle_bl_callback(ns, NULL, lock);
} else if (ns_is_client(ns) &&
!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
+ !ldlm_is_no_lru(lock) &&
+ !ldlm_is_bl_ast(lock)) {
LDLM_DEBUG(lock, "add lock into lru list");
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
* this is generally only going to be used by children
* whose parents already hold a lock so forward progress
* can still happen. */
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
!(flags & LDLM_FL_CBPENDING))
continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ if (!unref && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
continue;
policy->l_inodebits.bits))
continue;
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ if (!unref && LDLM_HAVE_MASK(lock, GONE))
continue;
if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
+ !ldlm_is_local(lock))
continue;
if (flags & LDLM_FL_TEST_LOCK) {
*/
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
- lock->l_flags |= LDLM_FL_LVB_READY;
+ ldlm_set_lvb_ready(lock);
wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
if (lock) {
ldlm_lock2handle(lock, lockh);
if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ (!ldlm_is_lvb_ready(lock))) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
l_wait_event(lock->l_waitq,
lock->l_flags & wait_flags,
&lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (!ldlm_is_lvb_ready(lock)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
else
lock = ldlm_handle2lock(lockh);
if (lock != NULL) {
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
+ if (LDLM_HAVE_MASK(lock, GONE))
GOTO(out, mode);
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
GOTO(out, mode);
lock->l_ast_data = data;
lock->l_pid = current_pid();
if (ns_is_server(ns))
- lock->l_flags |= LDLM_FL_NS_SRV;
+ ldlm_set_ns_srv(lock);
if (cbs) {
lock->l_blocking_ast = cbs->lcs_blocking;
lock->l_completion_ast = cbs->lcs_completion;
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags. */
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+ if (*flags & LDLM_FL_AST_DISCARD_DATA)
+ ldlm_set_ast_discard_data(lock);
/* This distinction between local lock trees is very important; a client
* namespace only has information about locks taken by that client, and
lock_res_and_lock(lock);
cfs_list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
lock->l_bl_ast_run++;
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
cfs_list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225 */
completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
+ ldlm_clear_cp_reqd(lock);
unlock_res_and_lock(lock);
if (completion_callback != NULL)
void ldlm_cancel_callback(struct ldlm_lock *lock)
{
check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (!ldlm_is_cancel(lock)) {
+ ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
unlock_res_and_lock(lock);
lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
LDLM_DEBUG(lock, "no blocking ast");
}
}
- lock->l_flags |= LDLM_FL_BL_DONE;
+ ldlm_set_bl_done(lock);
}
/**
LBUG();
}
- if (lock->l_flags & LDLM_FL_WAITED)
+ if (ldlm_is_waited(lock))
ldlm_del_waiting_lock(lock);
/* Releases cancel callback. */
/* Yes, second time, just in case it was added again while we were
* running with no res lock in ldlm_cancel_callback */
- if (lock->l_flags & LDLM_FL_WAITED)
+ if (ldlm_is_waited(lock))
ldlm_del_waiting_lock(lock);
ldlm_resource_unlink_lock(lock);
continue;
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
/* release the lock refcount where
* waiting_locks_callback() founds */
LDLM_LOCK_RELEASE(lock);
int timeout = ldlm_get_enq_timeout(lock);
/* NB: must be called with hold of lock_res_and_lock() */
- LASSERT(lock->l_flags & LDLM_FL_RES_LOCKED);
- lock->l_flags |= LDLM_FL_WAITED;
+ LASSERT(ldlm_is_res_locked(lock));
+ ldlm_set_waited(lock);
- LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
+ LASSERT(!ldlm_is_cancel_on_block(lock));
spin_lock_bh(&waiting_locks_spinlock);
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
static cfs_time_t next;
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
# ifdef HAVE_SERVER_SUPPORT
static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
{
- LASSERT((lock->l_flags & (LDLM_FL_RES_LOCKED|LDLM_FL_CANCEL_ON_BLOCK))
- == LDLM_FL_RES_LOCKED);
+ LASSERT(ldlm_is_res_locked(lock) && !ldlm_is_cancel_on_block(lock));
RETURN(1);
}
libcfs_nid2str(peer.nid));
ldlm_lock_cancel(lock);
rc = -ERESTART;
- } else if (lock->l_flags & LDLM_FL_CANCEL) {
+ } else if (ldlm_is_cancel(lock)) {
LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
"cancel was received (AST reply lost?)",
ast_type, libcfs_nid2str(peer.nid));
RETURN(0);
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
/* What's the point? */
unlock_res_and_lock(lock);
ptlrpc_req_finished(req);
RETURN(0);
}
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
+ if (ldlm_is_cancel_on_block(lock))
instant_cancel = 1;
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
body->lock_desc = *desc;
- body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_AST_FLAGS);
+ body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
LDLM_DEBUG(lock, "server preparing blocking AST");
/* We only send real blocking ASTs after the lock is granted */
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_AST_SENT) {
+ if (ldlm_is_ast_sent(lock)) {
body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
/* Copy AST flags like LDLM_FL_DISCARD_DATA. */
body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
- LDLM_AST_FLAGS);
+ LDLM_FL_AST_MASK);
/* We might get here prior to ldlm_handle_enqueue setting
* LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
* ldlm_handle_enqueue will call ldlm_lock_cancel() still,
* that would not only cancel the lock, but will also remove
* it from waiting list */
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
unlock_res_and_lock(lock);
ldlm_lock_cancel(lock);
instant_cancel = 1;
/* Now take into account flags to be inherited from original lock
request both in reply to client and in our own lock flags. */
- dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
+ dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_FL_INHERIT_MASK;
lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* Don't move a pending lock onto the export if it has already been
* disconnected due to eviction (bug 5683) or server umount (bug 24324).
OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
rc = -ENOTCONN;
- } else if (lock->l_flags & LDLM_FL_AST_SENT) {
+ } else if (ldlm_is_ast_sent(lock)) {
dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
if (lock->l_granted_mode == lock->l_req_mode) {
/*
if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
req->rq_export->exp_libclient) {
- if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
+ if (unlikely(!ldlm_is_cancel_on_block(lock) ||
!(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
CERROR("Granting sync lock to libclient. "
"req fl %d, rep fl %d, lock fl "LPX64"\n",
LDLM_DEBUG(lock, "client blocking AST callback handler");
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (ldlm_is_cancel_on_block(lock))
+ ldlm_set_cancel(lock);
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
schedule_timeout_and_set_state(
TASK_INTERRUPTIBLE, to);
if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_flags & LDLM_FL_DESTROYED)
+ ldlm_is_destroyed(lock))
break;
}
}
}
lock_res_and_lock(lock);
- if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ if (ldlm_is_destroyed(lock) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
/* BL_AST locks are not needed in LRU.
* Let ldlm_cancel_lru() be fast. */
ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST");
}
out:
if (rc < 0) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_failed(lock);
unlock_res_and_lock(lock);
wake_up(&lock->l_waitq);
}
spin_lock(&blp->blp_lock);
if (blwi->blwi_lock &&
- blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
RETURN(0);
}
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ if (ldlm_is_fail_loc(lock) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
lock_res_and_lock(lock);
lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_AST_FLAGS);
+ LDLM_FL_AST_MASK);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache. */
- if (((lock->l_flags & LDLM_FL_CANCELING) &&
- (lock->l_flags & LDLM_FL_BL_DONE)) ||
- (lock->l_flags & LDLM_FL_FAILED)) {
+ if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+ ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "callback on lock "
LPX64" - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
/* BL_AST locks are not needed in LRU.
* Let ldlm_cancel_lru() be fast. */
ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_BL_AST;
+ ldlm_set_bl_ast(lock);
}
unlock_res_and_lock(lock);
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+ if (!ldlm_is_cancel_on_block(lock)) {
rc = ldlm_callback_reply(req, 0);
if (req->rq_no_reply || rc)
ldlm_callback_errmsg(req, "Normal process", rc,
if (lock == NULL)
continue;
- rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
+ rc = ldlm_is_ast_sent(lock) ? 1 : 0;
if (rc)
LDLM_DEBUG(lock, "hpreq cancel lock");
LDLM_LOCK_PUT(lock);
return 0;
}
- if (lock->l_flags & LDLM_FL_AST_SENT) {
+ if (ldlm_is_ast_sent(lock)) {
unlock_res_and_lock(lock);
return 0;
}
LASSERT(lock->l_blocking_ast);
LASSERT(!lock->l_blocking_lock);
- lock->l_flags |= LDLM_FL_AST_SENT;
+ ldlm_set_ast_sent(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
* in exp_lock_hash. */
long delay;
int result;
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
if (ns_is_client(ldlm_lock_to_ns(lock)) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
int do_ast;
ENTRY;
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
/* NB: we don't have any lock now (lock_res_and_lock)
* because it's a new lock */
ldlm_lock_addref_internal_nolock(lock, mode);
- lock->l_flags |= LDLM_FL_LOCAL;
+ ldlm_set_local(lock);
if (*flags & LDLM_FL_ATOMIC_CB)
- lock->l_flags |= LDLM_FL_ATOMIC_CB;
+ ldlm_set_atomic_cb(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
- need_cancel = 1;
+ !ldlm_is_failed(lock)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * b=17645*/
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+ need_cancel = 1;
}
unlock_res_and_lock(lock);
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well */
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
* bug 7311). */
(LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
}
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
- ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
- unlock_res_and_lock(lock);
+ ldlm_cancel_callback(lock);
+ rc = (ldlm_is_bl_ast(lock)) ?
+ LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ unlock_res_and_lock(lock);
if (local_only) {
CDEBUG(D_DLMTRACE, "not sending request (at caller's "
break;
default:
result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ ldlm_set_skipped(lock);
break;
}
cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
+ ldlm_is_skipped(lock))
/* already processed */
continue;
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again. */
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
ldlm_lock_remove_from_lru_nolock(lock);
lock_res_and_lock(lock);
/* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
+ if (ldlm_is_canceling(lock) ||
(ldlm_lock_remove_from_lru(lock) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
* by itself, or the lock is no longer unused. */
unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, current);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
continue;
* frees appropriate state. This might lead to a race
* where while we are doing cancel here, server is also
* silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+ ldlm_clear_cancel_on_block(lock);
/* Setting the CBPENDING flag is a little misleading,
* but prevents an important race; namely, once
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
- continue;
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
+ continue;
if (lockmode_compat(lock->l_granted_mode, mode))
continue;
policy->l_inodebits.bits))
continue;
- /* See CBPENDING comment in ldlm_cancel_lru */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
- lock_flags;
+ /* See CBPENDING comment in ldlm_cancel_lru */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
+ lock_flags;
LASSERT(cfs_list_empty(&lock->l_bl_ast));
cfs_list_add(&lock->l_bl_ast, cancels);
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
RETURN(0);
}
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already) */
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
RETURN(0);
cfs_list_for_each(tmp, q) {
lock = cfs_list_entry(tmp, struct ldlm_lock,
l_res_link);
- if (lock->l_flags & LDLM_FL_CLEANED) {
+ if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
}
LDLM_LOCK_GET(lock);
- lock->l_flags |= LDLM_FL_CLEANED;
+ ldlm_set_cleaned(lock);
break;
}
/* Set CBPENDING so nothing in the cancellation path
* can match this lock. */
- lock->l_flags |= LDLM_FL_CBPENDING;
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_cbpending(lock);
+ ldlm_set_failed(lock);
lock->l_flags |= flags;
/* ... without sending a CANCEL message for local_only. */
if (local_only)
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ ldlm_set_local_only(lock);
if (local_only && (lock->l_readers || lock->l_writers)) {
/* This is a little bit gross, but much better than the
LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
- if (new->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(new)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
goto out;
}
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
- if ((lock->l_flags &
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
- return LDLM_ITER_CONTINUE;
- return LDLM_ITER_STOP;
+ return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
+ LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
}
/* find any ldlm lock of the inode in mdc and lov
ENTRY;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
- if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+ if ((lock->l_lvb_data != NULL) && ldlm_is_lvb_ready(lock))
RETURN(0);
/* if layout lock was granted right away, the layout is returned
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
lock_res_and_lock(lock);
- lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time. */
if (inode == NULL)
break;
- LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+ LASSERT(ldlm_is_canceling(lock));
if (bits & MDS_INODELOCK_XATTR)
ll_xattr_cache_destroy(inode);
* ast.
*/
if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ !ldlm_is_destroyed(olock)))
return 0;
if (! ergo(ols->ols_state == OLS_GRANTED,
static void osc_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct cl_lock *lock = slice->cls_lock;
- struct osc_lock *olck = cl2osc_lock(slice);
- struct ldlm_lock *dlmlock = olck->ols_lock;
- int result = 0;
- int discard;
+ struct cl_lock *lock = slice->cls_lock;
+ struct osc_lock *olck = cl2osc_lock(slice);
+ struct ldlm_lock *dlmlock = olck->ols_lock;
- LASSERT(cl_lock_is_mutexed(lock));
- LINVRNT(osc_lock_invariant(olck));
+ LASSERT(cl_lock_is_mutexed(lock));
+ LINVRNT(osc_lock_invariant(olck));
- if (dlmlock != NULL) {
- int do_cancel;
+ if (dlmlock != NULL) {
+ bool do_cancel;
+ int result = 0;
- discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
if (olck->ols_state >= OLS_GRANTED)
- result = osc_lock_flush(olck, discard);
- osc_lock_unhold(olck);
-
- lock_res_and_lock(dlmlock);
- /* Now that we're the only user of dlm read/write reference,
- * mostly the ->l_readers + ->l_writers should be zero.
- * However, there is a corner case.
- * See bug 18829 for details.*/
- do_cancel = (dlmlock->l_readers == 0 &&
- dlmlock->l_writers == 0);
- dlmlock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(dlmlock);
- if (do_cancel)
+ result = osc_lock_flush(olck,
+ ldlm_is_discard_data(dlmlock));
+ osc_lock_unhold(olck);
+
+ lock_res_and_lock(dlmlock);
+ /* Now that we're the only user of dlm read/write reference,
+ * mostly the ->l_readers + ->l_writers should be zero.
+ * However, there is a corner case.
+ * See b=18829 for details.*/
+ do_cancel = (dlmlock->l_readers == 0 &&
+ dlmlock->l_writers == 0);
+ ldlm_set_cbpending(dlmlock);
+ unlock_res_and_lock(dlmlock);
+ if (do_cancel)
result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
- if (result < 0)
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "lock %p cancel failure with error(%d)\n",
- lock, result);
- }
- olck->ols_state = OLS_CANCELLED;
- olck->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_detach(env, olck);
+ if (result < 0)
+ CL_LOCK_DEBUG(D_ERROR, env, lock,
+ "lock %p cancel failure with error(%d)\n",
+ lock, result);
+ }
+ olck->ols_state = OLS_CANCELLED;
+ olck->ols_flags &= ~LDLM_FL_LVB_READY;
+ osc_lock_detach(env, olck);
}
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
if (mode) {
struct ldlm_lock *matched = ldlm_handle2lock(lockh);
- if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
+ if ((agl != 0) && !ldlm_is_lvb_ready(matched)) {
/* For AGL, if enqueue RPC is sent but the lock is not
* granted, then skip to process this strpe.
* Return -ECANCELED to tell the caller. */
RETURN(-ENOLCK);
}
- if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) {
+ if (ldlm_is_ast_sent(lock)) {
struct ptlrpc_service_part *svc;
unsigned int timeout;
/* kick off reintegration thread if not running already, if
* it's just local cancel (for stack clean up or eviction),
* don't re-trigger the reintegration. */
- if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) == 0)
+ if (!ldlm_is_local_only(lock))
qsd_start_reint_thread(qqi);
lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
/* just local cancel (for stack clean up or eviction), don't
* release quota space in this case */
- if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) != 0) {
+ if (ldlm_is_local_only(lock)) {
lqe_putref(lqe);
break;
}