Whamcloud - gitweb
LU-2906 ldlm: use accessor macros for l_flags 63/7963/4
authorBruce Korb <bruce.korb@gmail.com>
Thu, 17 Oct 2013 23:34:23 +0000 (16:34 -0700)
committerOleg Drokin <oleg.drokin@intel.com>
Sat, 26 Oct 2013 22:55:30 +0000 (22:55 +0000)
Convert most all of the ldlm lock's l_flags references from direct
bit twiddling to using bit specific macros.  A few multi-bit
operations are left as an exercise for the reader.

The changes are mostly in ldlm, but also in llite, osc and quota, but
also fix a typo in the LDLM_{SET,CLEAR}_FLAG macros and:

Add a multi-bit (mask) test.
Remove the now obsolete LDLM_AST_FLAGS and LDLM_INHERIT_FLAGS defines.
Remove the obsolete LDLM_FL_HIDE_LOCK_MASK define.
Rename "local_only" mask to "off_wire" since it is confusingly similar
to a flag that (I think) means, "do not copy this lock over the wire."
The "local_only/off_wire" mask is also never used.  It's mostly
informational.

Wireshark output moved to "lustre_dlm_flags_wshark.c" and only
bits that can actually appear "on the wire" are emitted.
The "packet-lustre.c" code that references these bits now gets
emitted into that file.  e.g. the "local_only" bit is never put
on the wire, so references to it in wireshark are gone.

Signed-off-by: Bruce Korb <bruce.korb@gmail.com>
Reviewed-by: Keith Mannthey <Keith.Mannthey@intel.com>
Change-Id: I2527c46835e434f1009cf83919a203a358b04737
Reviewed-on: http://review.whamcloud.com/7963
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Doug Oucharek <doug.s.oucharek@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
25 files changed:
contrib/bit-masks/.gitignore
contrib/bit-masks/Makefile
contrib/bit-masks/lustre_dlm_flags.def
contrib/bit-masks/lustre_dlm_flags.tpl
lustre/contrib/wireshark/Makefile
lustre/contrib/wireshark/lustre_dlm_flags_wshark.c [new file with mode: 0644]
lustre/contrib/wireshark/packet-lnet.c
lustre/contrib/wireshark/packet-lustre.c
lustre/contrib/wireshark/wsconfig.sh
lustre/include/lustre_dlm_flags.h
lustre/ldlm/l_lock.c
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/llite/dcache.c
lustre/llite/file.c
lustre/llite/namei.c
lustre/osc/osc_lock.c
lustre/osc/osc_request.c
lustre/quota/qmt_handler.c
lustre/quota/qsd_lock.c

index 43ab8d7..900b4fe 100644 (file)
@@ -1,2 +1,2 @@
-lustre_dlm_flags.[ch]
+lustre_dlm_flags*.[ch]
 !Makefile
index a520f5b..f415ce1 100644 (file)
@@ -14,6 +14,8 @@ $(targ) : $(src)
                rm -f $(top_builddir)/lustre/include/$@ ; \
                sed '/It has been AutoGen-ed/s/-ed.*/-ed/;s/ *$$//' \
                        $@ > $(top_builddir)/lustre/include/$@ ; \
+               cp -fp lustre_dlm_flags_wshark.c \
+                       $(top_builddir)/lustre/contrib/wireshark/. ; \
        else cp $(top_builddir)/lustre/include/$@ . ; fi
 
 install : $(targ) install.sh
index 828cc23..ef2174e 100644 (file)
@@ -9,8 +9,10 @@ flag[ 0] = {
 flag[ 1] = {
     f-name  = block_granted;
     f-mask  = on_wire, blocked;
-    f-desc  = 'Server placed lock on granted list, or a recovering client wants '
-              'the lock added to the granted list, no questions asked.';
+    f-desc  = <<- _EOF_
+       Server placed lock on granted list, or a recovering client wants
+       the lock added to the granted list, no questions asked.
+       _EOF_;
 };
 
 flag[ 2] = {
@@ -132,7 +134,7 @@ flag[31] = {
 
 flag[32] = {
     f-name  = fail_loc;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        Used for marking lock as a target for -EINTR while cp_ast sleep
        emulation + race with upcoming bl_ast.
@@ -141,7 +143,7 @@ flag[32] = {
 
 flag[33] = {
     f-name  = skipped;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        Used while processing the unused list to know that we have already
        handled this lock and decided to skip it.
@@ -151,49 +153,49 @@ flag[33] = {
 
 flag[34] = {
     f-name  = cbpending;
-    f-mask  = local_only, hide_lock;
+    f-mask  = off_wire /* , hide_lock */;
     f-desc  = 'this lock is being destroyed';
 };
 
 flag[35] = {
     f-name  = wait_noreproc;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'not a real flag, not saved in lock';
 };
 
 flag[36] = {
     f-name  = cancel;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'cancellation callback already run';
 };
 
 flag[37] = {
     f-name  = local_only;
-    f-mask  = local_only, hide_lock;
-    f-desc  = 'whatever it might mean';
+    f-mask  = off_wire /* , hide_lock */;
+    f-desc  = 'whatever it might mean -- never transmitted?';
 };
 
 flag[38] = {
     f-name  = failed;
-    f-mask  = local_only, gone, hide_lock;
+    f-mask  = off_wire, gone /* , hide_lock */;
     f-desc  = "don't run the cancel callback under ldlm_cli_cancel_unused";
 };
 
 flag[39] = {
     f-name  = canceling;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'lock cancel has already been sent';
 };
 
 flag[40] = {
     f-name  = local;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'local lock (ie, no srv/cli split)';
 };
 
 flag[41] = {
     f-name  = lvb_ready;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        XXX FIXME: This is being added to b_size as a low-risk fix to the
        fact that the LVB filling happens _after_ the lock has been granted,
@@ -211,7 +213,7 @@ flag[41] = {
 
 flag[42] = {
     f-name  = kms_ignore;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        A lock contributes to the known minimum size (KMS) calculation until
        it has finished the part of its cancelation that performs write back
@@ -224,19 +226,19 @@ flag[42] = {
 
 flag[43] = {
     f-name  = cp_reqd;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'completion AST to be executed';
 };
 
 flag[44] = {
     f-name  = cleaned;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'cleanup_resource has already handled the lock';
 };
 
 flag[45] = {
     f-name  = atomic_cb;
-    f-mask  = local_only, hide_lock;
+    f-mask  = off_wire /* , hide_lock */;
     f-desc  = <<- _EOF_
        optimization hint: LDLM can run blocking callback from current context
        w/o involving separate thread. in order to decrease cs rate
@@ -245,7 +247,7 @@ flag[45] = {
 
 flag[46] = {
     f-name  = bl_ast;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        It may happen that a client initiates two operations, e.g. unlink
        and mkdir, such that the server sends a blocking AST for conflicting
@@ -254,23 +256,23 @@ flag[46] = {
        is taken by the first operation. LDLM_FL_BL_AST is set by
        ldlm_callback_handler() in the lock to prevent the Early Lock Cancel
        (ELC) code from cancelling it.
-
-       LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock
-       cache is dropped to let ldlm_callback_handler() return EINVAL to the
-       server. It is used when ELC RPC is already prepared and is waiting
-       for rpc_lock, too late to send a separate CANCEL RPC.
        _EOF_;
 };
 
 flag[47] = {
     f-name  = bl_done;
-    f-mask  = local_only;
-    f-desc  = 'whatever it might mean';
+    f-mask  = off_wire;
+    f-desc  = <<- _EOF_
+       Set by ldlm_cancel_callback() when lock cache is dropped to let
+       ldlm_callback_handler() return EINVAL to the server. It is used when
+       ELC RPC is already prepared and is waiting for rpc_lock, too late to
+       send a separate CANCEL RPC.
+       _EOF_;
 };
 
 flag[48] = {
     f-name  = no_lru;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        Don't put lock into the LRU list, so that it is not canceled due to
        aging.  Used by MGC locks, they are cancelled only at unmount or by
@@ -280,7 +282,7 @@ flag[48] = {
 
 flag[49] = {
     f-name  = fail_notified;
-    f-mask  = local_only, gone;
+    f-mask  = off_wire, gone;
     f-desc  = <<- _EOF_
        Set for locks that failed and where the server has been notified.
 
@@ -290,7 +292,7 @@ flag[49] = {
 
 flag[50] = {
     f-name  = destroyed;
-    f-mask  = local_only, gone;
+    f-mask  = off_wire, gone;
     f-desc  = <<- _EOF_
        Set for locks that were removed from class hash table and will be
        destroyed when last reference to them is released. Set by
@@ -302,13 +304,13 @@ flag[50] = {
 
 flag[51] = {
     f-name  = server_lock;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'flag whether this is a server namespace lock';
 };
 
 flag[52] = {
     f-name  = res_locked;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
 
@@ -321,7 +323,7 @@ flag[52] = {
 
 flag[53] = {
     f-name  = waited;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = <<- _EOF_
        It's set once we call ldlm_add_waiting_lock_res_locked()
        to start the lock-timeout timer and it will never be reset.
@@ -332,12 +334,12 @@ flag[53] = {
 
 flag[54] = {
     f-name  = ns_srv;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'Flag whether this is a server namespace lock.';
 };
 
 flag[55] = {
     f-name  = excl;
-    f-mask  = local_only;
+    f-mask  = off_wire;
     f-desc  = 'Flag whether this lock can be reused. Used by exclusive open.';
 };
index 5a5cc26..b2014b4 100644 (file)
@@ -34,7 +34,7 @@ fmt='#define LDLM_FL_%-16s        0x%016XULL // bit  %2u
 #define ldlm_clear_%-20s LDLM_CLEAR_FLAG((_l), 1ULL << %2u)\n'
 acc_fmt=''
 tmpfile=[=(base-name)=]-$$.tmp
-exec 8>&1 1> $tmpfile
+exec 7>&1 1> $tmpfile
 [=
 
 FOR flag
@@ -73,7 +73,7 @@ mask_[= f-mask =]=$(( ${mask_[= f-mask =]:-0} + bitval ))[=
 ENDFOR flag
 
 =]
-exec 1>&8 8>&-
+exec 1>&7 7>&-
 fmt='\n/** l_flags bits marked as "%s" bits */
 #define LDLM_FL_%-22s  0x%016XULL\n'
 printf "$fmt" all_flags ALL_FLAGS_MASK $allbits
@@ -97,39 +97,120 @@ rm -f $tmpfile script.sh[=
 =]
 
 /** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b)        (((_l)->l_flags & (_b)) != 0)
+#define LDLM_TEST_FLAG(_l, _b)    (((_l)->l_flags & (_b)) != 0)
+
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m)    (((_l)->l_flags & LDLM_FL_##_m##_MASK) != 0)
 
 /** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b)         (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b)     ((_l)->l_flags |= (_b))
 
 /** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b)       (((_l)->l_flags &= ~(_b))
-
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS            LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS                LDLM_FL_AST_MASK
+#define LDLM_CLEAR_FLAG(_l, _b)   ((_l)->l_flags &= ~(_b))
 
 /** @} subgroup */
 /** @} group */
-#ifdef WIRESHARK_COMPILE[=
+#endif /* LDLM_ALL_FLAGS_MASK */
+[=
+(out-push-new (string-append (base-name) "_wshark.c"))
+(define flags-vals "")
+(define dissect    "")
+(define init-text  "")
+
+(define up-name    "")
+(define down-name  "")
+
+(define dissect-fmt
+    "  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_%s);\n")
+(out-push-new)     \=]
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_%1$s,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_%2$s",
+      /* abbrev  */ "lustre.ldlm_fl_%1$s",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_%2$s,
+      /* blurb   */ %3$s,
+      /* id      */ HFILL
+    }
+  },
+[= (define init-fmt (out-pop #t)) \=]
+/**
+ * \file [=(out-name)=]
+ *
+ * wireshark definitions.  This file contains the ldlm lock flag bits
+ * that can be transmitted over the wire.  There are many other bits,
+ * but they are not transmitted and not handled here.
+ */
+#ifdef WSHARK_HEAD
+[=
+
 FOR flag                       =][=
-  (sprintf "\nstatic int hf_lustre_ldlm_fl_%-20s= -1;"
-           (string-downcase! (get "f-name")) ) =][=
-ENDFOR flag                    =]
 
-const value_string lustre_ldlm_flags_vals[] = {[=
+  (if (match-value? = "f-mask" "on_wire") (begin
+      (set! temp-txt  (get "f-name"))
+      (set! up-name   (string-upcase (string->c-name! temp-txt)))
+      (set! down-name (string-downcase temp-txt))
 
-FOR flag                       =][=
-   (define up-name (string-upcase! (string->c-name! (get "f-name"))))
-   (sprintf "\n  {LDLM_FL_%-20s \"LDLM_FL_%s\"}," (string-append up-name ",")
-            up-name)           =][=
+      (set! flags-vals (string-append flags-vals (sprintf
+            "\n  {LDLM_FL_%-20s \"LDLM_FL_%s\"},"
+            (string-append up-name ",") up-name )))
+
+      (set! dissect (string-append dissect (sprintf dissect-fmt
+            down-name)))
+
+      (set! init-text (string-append init-text (sprintf init-fmt
+            down-name up-name (c-string (get "f-desc")) )))
+
+      (ag-fprintf 0 "\nstatic int hf_lustre_ldlm_fl_%-20s= -1;"
+           down-name)
+  )  )                         =][=
 ENDFOR flag                    =]
+
+const value_string lustre_ldlm_flags_vals[] = {[= (. flags-vals) =]
   { 0, NULL }
 };
-#endif /*  WIRESHARK_COMPILE */
-[= #
+
+/* IDL: struct ldlm_reply { */
+/* IDL:        uint32 lock_flags; */
+/* IDL:        uint32 lock_padding; */
+/* IDL:        struct ldlm_lock_desc { */
+/* IDL: } lock_desc; */
+/* IDL:        struct lustre_handle { */
+/* IDL: } lock_handle; */
+/* IDL:        uint64 lock_policy_res1; */
+/* IDL:        uint64 lock_policy_res2; */
+/* IDL: } */
+
+static int
+lustre_dissect_element_ldlm_lock_flags(
+       tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_,
+       proto_tree *parent_tree _U_, int hf_index _U_)
+{
+  proto_item *item = NULL;
+  proto_tree *tree = NULL;
+
+  if (parent_tree) {
+    item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
+    tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
+  }
+[= (shell
+"sed '$s/^/  /;$i\\
+  return' <<- \\_EOF_\n" dissect "_EOF_"
+) =]
+}
+#endif /* WSHARK_HEAD */
+
+#ifdef WSHARK_INIT_DATA
+[=
+
+(emit init-text
+"\n#endif /* WSHARK_INIT_DATA */\n")
+(out-pop)
+
+=][= #
 
 // TEST CODE                    =][=
 IF  (getenv "TESTING")          =][=
@@ -317,5 +398,4 @@ ENDIF TESTING
  * indent-tabs-mode: t
  * End:
 
-\=]
-#endif /* LDLM_ALL_FLAGS_MASK */
+=]
index aad0224..885319f 100644 (file)
@@ -32,20 +32,25 @@ WS_HOME =
 #    Note: When using the cbuild script leave the macro undefined
 #    (default: /usr/lib/wireshark/plugins/$(WS_VERSION))
 #
-#    For non-root and  non-rpmbuilds you might want to set the value to ${HOME}/.wireshark/plugins
+#    For non-root and  non-rpmbuilds you might want to set the value to
+#        ${HOME}/.wireshark/plugins
 PLUGIN_DIR =
 
-CFLAGS = -DINET6 -D_U_=__attribute__\(\(unused\)\) -Wall -Wpointer-arith -g -DXTHREADS -D_REENTRANT -DXUSE_MTSAFE_API -fPIC -DPIC
+CFLAGS = -DINET6 -D_U_=__attribute__\(\(unused\)\) -Wall -Wpointer-arith -g \
+       -DXTHREADS -D_REENTRANT -DXUSE_MTSAFE_API -fPIC -DPIC
 
 ifdef WS_HOME
-#INCS = $(shell echo "-I${WS_HOME} `pkg-config --libs --cflags glib-2.0`")
-INCS = $(shell echo "-I${WS_HOME} $(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H 
+#INCS = $(shell echo "-I${WS_HOME} `./wsconfig.sh --libs --cflags glib-2.0`")
+INCS := $(shell echo "-I${WS_HOME} $(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H 
 
 else
-INCS = $(shell pkg-config --libs --cflags wireshark) $(shell echo "$(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
-WS_VERSION = $(shell pkg-config --modversion wireshark)
-LIBDIR = $(shell pkg-config --variable=libdir wireshark)
-CHECK=pkg-config --atleast-version=${MIN_WIRESHARK_VERSION} wireshark
+INCS := $(shell ./wsconfig.sh --libs --cflags wireshark) \
+       $(shell ./wsconfig.sh --libs --cflags glib-2.0) \
+       -I../../include \
+       $(shell echo "$(PLUGIN_COMPILE_FLAGS)") -DHAVE_CONFIG_H
+WS_VERSION := $(shell ./wsconfig.sh --modversion wireshark)
+LIBDIR := $(shell ./wsconfig.sh --variable=libdir wireshark)
+CHECK=./wsconfig.sh --atleast-version=${MIN_WIRESHARK_VERSION} wireshark
 endif
 
 CFLAGS += $(INCS)
@@ -55,8 +60,8 @@ SRCS_LUSTRE = packet-lustre.c
 
 CC   = gcc
 
-OBJS_LNET = $(foreach src, $(SRCS_LNET), $(src:.c=.o))
-OBJS_LUSTRE = $(foreach src, $(SRCS_LUSTRE), $(src:.c=.o))
+OBJS_LNET := $(foreach src, $(SRCS_LNET), $(src:.c=.o))
+OBJS_LUSTRE := $(foreach src, $(SRCS_LUSTRE), $(src:.c=.o))
 
 PLUGINS=lnet.so lustre.so
 
@@ -89,7 +94,12 @@ ifndef PLUGIN_DIR
 endif
 
 
-all: check $(PLUGINS)
+all: check $(PLUGINS) lustre_dlm_flags_wshark.c
+
+lustre_dlm_flags_wshark.c :
+       cd ../../../contrib/bit-masks ; \
+       make
+       test -f lustre_dlm_flags_wshark.c
 
 check:
        @if ! ${CHECK}; then\
diff --git a/lustre/contrib/wireshark/lustre_dlm_flags_wshark.c b/lustre/contrib/wireshark/lustre_dlm_flags_wshark.c
new file mode 100644 (file)
index 0000000..0428d8b
--- /dev/null
@@ -0,0 +1,309 @@
+/**
+ * \file lustre_dlm_flags_wshark.c
+ *
+ * wireshark definitions.  This file contains the ldlm lock flag bits
+ * that can be transmitted over the wire.  There are many other bits,
+ * but they are not transmitted and not handled here.
+ */
+#ifdef WSHARK_HEAD
+
+static int hf_lustre_ldlm_fl_lock_changed        = -1;
+static int hf_lustre_ldlm_fl_block_granted       = -1;
+static int hf_lustre_ldlm_fl_block_conv          = -1;
+static int hf_lustre_ldlm_fl_block_wait          = -1;
+static int hf_lustre_ldlm_fl_ast_sent            = -1;
+static int hf_lustre_ldlm_fl_replay              = -1;
+static int hf_lustre_ldlm_fl_intent_only         = -1;
+static int hf_lustre_ldlm_fl_has_intent          = -1;
+static int hf_lustre_ldlm_fl_flock_deadlock      = -1;
+static int hf_lustre_ldlm_fl_discard_data        = -1;
+static int hf_lustre_ldlm_fl_no_timeout          = -1;
+static int hf_lustre_ldlm_fl_block_nowait        = -1;
+static int hf_lustre_ldlm_fl_test_lock           = -1;
+static int hf_lustre_ldlm_fl_cancel_on_block     = -1;
+static int hf_lustre_ldlm_fl_deny_on_contention  = -1;
+static int hf_lustre_ldlm_fl_ast_discard_data    = -1;
+
+const value_string lustre_ldlm_flags_vals[] = {
+  {LDLM_FL_LOCK_CHANGED,        "LDLM_FL_LOCK_CHANGED"},
+  {LDLM_FL_BLOCK_GRANTED,       "LDLM_FL_BLOCK_GRANTED"},
+  {LDLM_FL_BLOCK_CONV,          "LDLM_FL_BLOCK_CONV"},
+  {LDLM_FL_BLOCK_WAIT,          "LDLM_FL_BLOCK_WAIT"},
+  {LDLM_FL_AST_SENT,            "LDLM_FL_AST_SENT"},
+  {LDLM_FL_REPLAY,              "LDLM_FL_REPLAY"},
+  {LDLM_FL_INTENT_ONLY,         "LDLM_FL_INTENT_ONLY"},
+  {LDLM_FL_HAS_INTENT,          "LDLM_FL_HAS_INTENT"},
+  {LDLM_FL_FLOCK_DEADLOCK,      "LDLM_FL_FLOCK_DEADLOCK"},
+  {LDLM_FL_DISCARD_DATA,        "LDLM_FL_DISCARD_DATA"},
+  {LDLM_FL_NO_TIMEOUT,          "LDLM_FL_NO_TIMEOUT"},
+  {LDLM_FL_BLOCK_NOWAIT,        "LDLM_FL_BLOCK_NOWAIT"},
+  {LDLM_FL_TEST_LOCK,           "LDLM_FL_TEST_LOCK"},
+  {LDLM_FL_CANCEL_ON_BLOCK,     "LDLM_FL_CANCEL_ON_BLOCK"},
+  {LDLM_FL_DENY_ON_CONTENTION,  "LDLM_FL_DENY_ON_CONTENTION"},
+  {LDLM_FL_AST_DISCARD_DATA,    "LDLM_FL_AST_DISCARD_DATA"},
+  { 0, NULL }
+};
+
+/* IDL: struct ldlm_reply { */
+/* IDL:        uint32 lock_flags; */
+/* IDL:        uint32 lock_padding; */
+/* IDL:        struct ldlm_lock_desc { */
+/* IDL: } lock_desc; */
+/* IDL:        struct lustre_handle { */
+/* IDL: } lock_handle; */
+/* IDL:        uint64 lock_policy_res1; */
+/* IDL:        uint64 lock_policy_res2; */
+/* IDL: } */
+
+static int
+lustre_dissect_element_ldlm_lock_flags(
+       tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_,
+       proto_tree *parent_tree _U_, int hf_index _U_)
+{
+  proto_item *item = NULL;
+  proto_tree *tree = NULL;
+
+  if (parent_tree) {
+    item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
+    tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
+  }
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lock_changed);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_granted);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_conv);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_wait);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_sent);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_replay);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_intent_only);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_has_intent);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_flock_deadlock);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_discard_data);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_timeout);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_nowait);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_test_lock);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel_on_block);
+  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_deny_on_contention);
+  return
+    dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_discard_data);
+}
+#endif /* WSHARK_HEAD */
+
+#ifdef WSHARK_INIT_DATA
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_lock_changed,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_LOCK_CHANGED",
+      /* abbrev  */ "lustre.ldlm_fl_lock_changed",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_LOCK_CHANGED,
+      /* blurb   */ "extent, mode, or resource changed",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_block_granted,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_BLOCK_GRANTED",
+      /* abbrev  */ "lustre.ldlm_fl_block_granted",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_BLOCK_GRANTED,
+      /* blurb   */ "Server placed lock on granted list, or a recovering client wants\n"
+       "the lock added to the granted list, no questions asked.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_block_conv,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_BLOCK_CONV",
+      /* abbrev  */ "lustre.ldlm_fl_block_conv",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_BLOCK_CONV,
+      /* blurb   */ "Server placed lock on conv list, or a recovering client wants the lock\n"
+       "added to the conv list, no questions asked.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_block_wait,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_BLOCK_WAIT",
+      /* abbrev  */ "lustre.ldlm_fl_block_wait",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_BLOCK_WAIT,
+      /* blurb   */ "Server placed lock on wait list, or a recovering client wants\n"
+       "the lock added to the wait list, no questions asked.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_ast_sent,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_AST_SENT",
+      /* abbrev  */ "lustre.ldlm_fl_ast_sent",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_AST_SENT,
+      /* blurb   */ "blocking or cancel packet was queued for sending.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_replay,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_REPLAY",
+      /* abbrev  */ "lustre.ldlm_fl_replay",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_REPLAY,
+      /* blurb   */ "Lock is being replayed.  This could probably be implied by the fact that\n"
+       "one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_intent_only,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_INTENT_ONLY",
+      /* abbrev  */ "lustre.ldlm_fl_intent_only",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_INTENT_ONLY,
+      /* blurb   */ "Don't grant lock, just do intent.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_has_intent,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_HAS_INTENT",
+      /* abbrev  */ "lustre.ldlm_fl_has_intent",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_HAS_INTENT,
+      /* blurb   */ "lock request has intent",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_flock_deadlock,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_FLOCK_DEADLOCK",
+      /* abbrev  */ "lustre.ldlm_fl_flock_deadlock",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_FLOCK_DEADLOCK,
+      /* blurb   */ "flock deadlock detected",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_discard_data,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_DISCARD_DATA",
+      /* abbrev  */ "lustre.ldlm_fl_discard_data",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_DISCARD_DATA,
+      /* blurb   */ "discard (no writeback) on cancel",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_no_timeout,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_NO_TIMEOUT",
+      /* abbrev  */ "lustre.ldlm_fl_no_timeout",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_NO_TIMEOUT,
+      /* blurb   */ "Blocked by group lock - wait indefinitely",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_block_nowait,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_BLOCK_NOWAIT",
+      /* abbrev  */ "lustre.ldlm_fl_block_nowait",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_BLOCK_NOWAIT,
+      /* blurb   */ "Server told not to wait if blocked. For AGL, OST will not send\n"
+       "glimpse callback.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_test_lock,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_TEST_LOCK",
+      /* abbrev  */ "lustre.ldlm_fl_test_lock",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_TEST_LOCK,
+      /* blurb   */ "return blocking lock",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_cancel_on_block,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_CANCEL_ON_BLOCK",
+      /* abbrev  */ "lustre.ldlm_fl_cancel_on_block",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_CANCEL_ON_BLOCK,
+      /* blurb   */ "Immediatelly cancel such locks when they block some other locks. Send\n"
+       "cancel notification to original lock holder, but expect no reply. This is\n"
+       "for clients (like liblustre) that cannot be expected to reliably response\n"
+       "to blocking AST.",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_deny_on_contention,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_DENY_ON_CONTENTION",
+      /* abbrev  */ "lustre.ldlm_fl_deny_on_contention",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_DENY_ON_CONTENTION,
+      /* blurb   */ "measure lock contention and return -EUSERS if locking contention is high",
+      /* id      */ HFILL
+    }
+  },
+  {
+    /* p_id    */ &hf_lustre_ldlm_fl_ast_discard_data,
+    /* hfinfo  */ {
+      /* name    */ "LDLM_FL_AST_DISCARD_DATA",
+      /* abbrev  */ "lustre.ldlm_fl_ast_discard_data",
+      /* type    */ FT_BOOLEAN,
+      /* display */ 32,
+      /* strings */ TFS(&lnet_flags_set_truth),
+      /* bitmask */ LDLM_FL_AST_DISCARD_DATA,
+      /* blurb   */ "These are flags that are mapped into the flags and ASTs of blocking locks\n"
+       "Add FL_DISCARD to blocking ASTs",
+      /* id      */ HFILL
+    }
+  },
+
+#endif /* WSHARK_INIT_DATA */
index 39b09e4..3e14eda 100644 (file)
@@ -692,10 +692,10 @@ dissect_lnet_message(tvbuff_t * tvb, packet_info *pinfo, proto_tree *tree)
 
        guint64 match;
        guint32 msg_type;
-
+/*
        lnet_request_val_t* conversation_val ;
 
-
+*/
        if (check_col(pinfo->cinfo, COL_PROTOCOL)) {
                col_set_str(pinfo->cinfo, COL_PROTOCOL, "Lnet");
        }
@@ -805,8 +805,12 @@ dissect_lnet_message(tvbuff_t * tvb, packet_info *pinfo, proto_tree *tree)
                }
 
 
-               conversation_val = get_lnet_conv(pinfo , lnet_request_hash, match );
-               /*      proto_tree_add_text(tree, tvb, 0 , 0, "match = %" G_GINT64_MODIFIER "u parent = %d", conversation_val -> match_bits , conversation_val -> packet_num_parent); */
+               /* conversation_val = */
+                get_lnet_conv(pinfo , lnet_request_hash, match );
+               /*      proto_tree_add_text(tree, tvb, 0 , 0, "match = %"
+                        G_GINT64_MODIFIER "u parent = %d",
+                        conversation_val -> match_bits ,
+                        conversation_val -> packet_num_parent); */
 
 
                /* padding */
index f53c85f..c2b7ab7 100644 (file)
@@ -37,6 +37,7 @@
 #include <epan/packet.h>
 
 #include <epan/dissectors/packet-windows-common.h>
+#include "lustre_dlm_flags.h"
 
 const true_false_string lnet_flags_set_truth = { "Set", "Unset" };
 
@@ -279,9 +280,6 @@ enum fld_rpc_opc {
   FLD_FIRST_OPC    = FLD_QUERY
 };
 
-#define  WIRESHARK_COMPILE
-#include "lustre_dlm_flags.h"
-
 #define LDLM_ENQUEUE (101)
 #define LDLM_CONVERT (102)
 #define LDLM_CANCEL (103)
@@ -1030,11 +1028,17 @@ static int hf_lustre_llog_hdr_llh_flag_is_play = -1;
 /* proto declaration */
 static gint proto_lustre = -1;
 
+typedef int (dissect_func)(
+    tvbuff_t *tvb, gint offset, packet_info *pinfo _U_, proto_tree *tree,
+    int hfindex);
 
+static dissect_func dissect_uint64, dissect_uint32, dissect_uint16, dissect_uint8;
 
+#define  WSHARK_HEAD
+#include "lustre_dlm_flags_wshark.c"
+#undef   WSHARK_HEAD
 
 static int ldlm_opcode_process(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree * tree _U_, guint64 intent_opc _U_) ;
-static int lustre_dissect_element_ldlm_lock_flags(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, int hf_index _U_);
 static int add_extra_padding(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree * tree _U_) ;
 
 
@@ -5669,62 +5673,6 @@ lustre_dissect_struct_ldlm_request(tvbuff_t *tvb _U_, int offset _U_, packet_inf
   return offset;
 }
 
-
-/* IDL: struct ldlm_reply { */
-/* IDL:        uint32 lock_flags; */
-/* IDL:        uint32 lock_padding; */
-/* IDL:        struct ldlm_lock_desc { */
-/* IDL: } lock_desc; */
-/* IDL:        struct lustre_handle { */
-/* IDL: } lock_handle; */
-/* IDL:        uint64 lock_policy_res1; */
-/* IDL:        uint64 lock_policy_res2; */
-/* IDL: } */
-
-static int
-lustre_dissect_element_ldlm_lock_flags(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *parent_tree _U_, int hf_index _U_)
-{
-  proto_item *item = NULL;
-  proto_tree *tree = NULL;
-
-  if (parent_tree) {
-    item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
-    tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
-  }
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_discard_data);
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_deny_on_contention);
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_done           );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_ast            );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_atomic_cb         );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cleaned           );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cp_reqd           );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel_on_block   );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_lru            );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_kms_ignore        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lvb_ready         );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_test_lock         );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_nowait      );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_no_timeout        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_discard_data      );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_warn              );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_local             );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_canceling         );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_has_intent        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_failed            );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_local_only        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_intent_only       );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_replay            );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cancel            );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_wait_noreproc     );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_sent          );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_cbpending         );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_wait        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_conv        );
-  dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_block_granted     );
-  offset=dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_lock_changed      );
-  return offset;
-}
-
 static int
 lustre_dissect_element_ldlm_reply_lock_padding(tvbuff_t *tvb _U_, int offset _U_, packet_info *pinfo _U_, proto_tree *tree _U_)
 {
@@ -10478,37 +10426,9 @@ void proto_register_dcerpc_lustre(void)
     { &hf_lustre_ldlm_reply_lock_flags,
       { "Lock Flags", "lustre.ldlm_reply.lock_flags", FT_UINT32,BASE_HEX, NULL, 0, "", HFILL }},
 
-    {&hf_lustre_ldlm_fl_lock_changed, {"LDLM_FL_LOCK_CHANGED", "lustre.ldlm_fl_lock_changed", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCK_CHANGED, "", HFILL } },
-    {&hf_lustre_ldlm_fl_block_granted, {"LDLM_FL_BLOCK_GRANTED", "lustre.ldlm_fl_block_granted", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_GRANTED, "", HFILL } },
-    {&hf_lustre_ldlm_fl_block_conv, {"LDLM_FL_BLOCK_CONV", "lustre.ldlm_fl_block_conv", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_CONV, "", HFILL } },
-    {&hf_lustre_ldlm_fl_block_wait, {"LDLM_FL_BLOCK_WAIT", "lustre.ldlm_fl_block_wait", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_WAIT, "", HFILL } },
-    {&hf_lustre_ldlm_fl_cbpending, {"LDLM_FL_CBPENDING", "lustre.ldlm_fl_cbpending", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CBPENDING, "", HFILL } },
-    {&hf_lustre_ldlm_fl_ast_sent, {"LDLM_FL_AST_SENT", "lustre.ldlm_fl_ast_sent", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_AST_SENT, "", HFILL } },
-    {&hf_lustre_ldlm_fl_wait_noreproc, {"LDLM_FL_WAIT_NOREPROC", "lustre.ldlm_fl_wait_noreproc", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_WAIT_NOREPROC, "", HFILL } },
-    {&hf_lustre_ldlm_fl_cancel, {"LDLM_FL_CANCEL", "lustre.ldlm_fl_cancel", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCEL, "", HFILL } },
-    {&hf_lustre_ldlm_fl_replay, {"LDLM_FL_REPLAY", "lustre.ldlm_fl_replay", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_REPLAY, "", HFILL } },
-    {&hf_lustre_ldlm_fl_intent_only, {"LDLM_FL_INTENT_ONLY", "lustre.ldlm_fl_intent_only", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_INTENT_ONLY, "", HFILL } },
-    {&hf_lustre_ldlm_fl_local_only, {"LDLM_FL_LOCAL_ONLY", "lustre.ldlm_fl_local_only", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCAL_ONLY, "", HFILL } },
-    {&hf_lustre_ldlm_fl_failed, {"LDLM_FL_FAILED", "lustre.ldlm_fl_failed", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_FAILED, "", HFILL } },
-    {&hf_lustre_ldlm_fl_has_intent, {"LDLM_FL_HAS_INTENT", "lustre.ldlm_fl_has_intent", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_HAS_INTENT, "", HFILL } },
-    {&hf_lustre_ldlm_fl_canceling, {"LDLM_FL_CANCELING", "lustre.ldlm_fl_canceling", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCELING, "", HFILL } },
-    {&hf_lustre_ldlm_fl_local, {"LDLM_FL_LOCAL", "lustre.ldlm_fl_local", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LOCAL, "", HFILL } },
-    {&hf_lustre_ldlm_fl_warn, {"LDLM_FL_WARN", "lustre.ldlm_fl_warn", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_WARN, "", HFILL } },
-    {&hf_lustre_ldlm_fl_discard_data, {"LDLM_FL_DISCARD_DATA", "lustre.ldlm_fl_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_DISCARD_DATA, "", HFILL } },
-    {&hf_lustre_ldlm_fl_no_timeout, {"LDLM_FL_NO_TIMEOUT", "lustre.ldlm_fl_no_timeout", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_NO_TIMEOUT, "", HFILL } },
-    {&hf_lustre_ldlm_fl_block_nowait, {"LDLM_FL_BLOCK_NOWAIT", "lustre.ldlm_fl_block_nowait", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BLOCK_NOWAIT, "", HFILL } },
-    {&hf_lustre_ldlm_fl_test_lock, {"LDLM_FL_TEST_LOCK", "lustre.ldlm_fl_test_lock", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_TEST_LOCK, "", HFILL } },
-    {&hf_lustre_ldlm_fl_lvb_ready, {"LDLM_FL_LVB_READY", "lustre.ldlm_fl_lvb_ready", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_LVB_READY, "", HFILL } },
-    {&hf_lustre_ldlm_fl_kms_ignore, {"LDLM_FL_KMS_IGNORE", "lustre.ldlm_fl_kms_ignore", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_KMS_IGNORE, "", HFILL } },
-    {&hf_lustre_ldlm_fl_no_lru, {"LDLM_FL_NO_LRU", "lustre.ldlm_fl_no_lru", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_NO_LRU, "", HFILL } },
-    {&hf_lustre_ldlm_fl_cancel_on_block, {"LDLM_FL_CANCEL_ON_BLOCK", "lustre.ldlm_fl_cancel_on_block", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CANCEL_ON_BLOCK, "", HFILL } },
-    {&hf_lustre_ldlm_fl_cp_reqd, {"LDLM_FL_CP_REQD", "lustre.ldlm_fl_cp_reqd", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CP_REQD, "", HFILL } },
-    {&hf_lustre_ldlm_fl_cleaned, {"LDLM_FL_CLEANED", "lustre.ldlm_fl_cleaned", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_CLEANED, "", HFILL } },
-    {&hf_lustre_ldlm_fl_atomic_cb, {"LDLM_FL_ATOMIC_CB", "lustre.ldlm_fl_atomic_cb", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_ATOMIC_CB, "", HFILL } },
-    {&hf_lustre_ldlm_fl_bl_ast, {"LDLM_FL_BL_AST", "lustre.ldlm_fl_bl_ast", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_AST, "", HFILL } },
-    {&hf_lustre_ldlm_fl_bl_done, {"LDLM_FL_BL_DONE", "lustre.ldlm_fl_bl_done", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_DONE, "", HFILL } },
-    {&hf_lustre_ldlm_fl_deny_on_contention, {"LDLM_FL_DENY_ON_CONTENTION", "lustre.ldlm_fl_deny_on_contention", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_DENY_ON_CONTENTION, "", HFILL } },
-    {&hf_lustre_ldlm_fl_ast_discard_data, {"LDLM_AST_DISCARD_DATA", "lustre.ldlm_ast_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_AST_DISCARD_DATA, "", HFILL } },
+#define  WSHARK_INIT_DATA
+#include "lustre_dlm_flags_wshark.c"
+#undef   WSHARK_INIT_DATA
 
     { &hf_lustre_obdo_o_misc,
       { "O Misc", "lustre.obdo.o_misc", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
index 78c4904..7fbe6de 100644 (file)
@@ -1,33 +1,35 @@
 # This file contain all configuration information to build
 # `lustre-release/lustre/contrib/wireshark'
 
-###########################################################################
-#                                                                         #
-#    DOWNLOAD CONFIGURATION
-#                                                                         #
-###########################################################################
+[[ $1 =~ --.* ]] || {
+    ###########################################################################
+    #                                                                         #
+    #    DOWNLOAD CONFIGURATION
+    #                                                                         #
+    ###########################################################################
 
-##   BEGIN: -can-edit   ##
+    ##   BEGIN: -can-edit   ##
     # URL of directory containing all source tar balls
-export WS_DOWNLOAD_BASE_URL='http://wiresharkdownloads.riverbed.com/wireshark/src/all-versions'
+    export WS_DOWNLOAD_BASE_URL='http://wiresharkdownloads.riverbed.com'
+    WS_DOWNLOAD_BASE_URL+='/wireshark/src/all-versions'
 
     # wireshark verion to be used
-export WS_VERSION='1.6.8'
-##   END  : -can-edit   ##
+    export WS_VERSION='1.6.8'
+    ##   END  : -can-edit   ##
 
     # URL of the wireshark source code tarball
     # Implicit assumption: Wireshark release names follow the nameing
     # convention coded in the content of the following varialble
-export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
+    export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
 
 
-###########################################################################
-#                                                                         #
-#                   BUILD ENVIRONMENT                                     #
-#                                                                         #
-###########################################################################
+    ###########################################################################
+    #                                                                         #
+    #                   BUILD ENVIRONMENT                                     #
+    #                                                                         #
+    ###########################################################################
 
-##   BEGIN: -can-edit   ##
+    ##   BEGIN: -can-edit   ##
     # Space separate list of RPMs needed to be installed for 
     # compilation of wireshark
 
@@ -35,31 +37,155 @@ export WS_SOURCE_URL="${WS_DOWNLOAD_BASE_URL}/wireshark-${WS_VERSION}.tar.bz2"
     # If distributions 'marked' by same release file, content has to
     # parsed and variable PREREQUISITE_RPMS has to be set accoringly to
     # package name(s) used for each distro.
-if [ -r /etc/redhat-release ] ; then
-    export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap libpcap-devel perl'
-elif [ -r /etc/SuSE-release ] ; then
-    export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap0 libpcap-devel perl'
-fi
+    if [ -r /etc/redhat-release ] ; then
+        export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap libpcap-devel perl'
+    elif [ -r /etc/SuSE-release ] ; then
+        export PREREQUISITE_RPMS='gtk2 gtk2-devel glib2 libpcap0 libpcap-devel perl'
+    fi
 
     # Include and linker flags needed to Lustre/LNet
     # Only version indepent information should be added here
     # (Back ticked expression will be evaluated by make command)
-export PLUGIN_COMPILE_FLAGS='`pkg-config --libs --cflags glib-2.0`'
-##   END  : -can-edit   ##
+    export PLUGIN_COMPILE_FLAGS='`pkg-config --libs --cflags glib-2.0`'
+    ##   END  : -can-edit   ##
 
-    # Top-level directory to be used to unpack/compile/install wireshark/lustre-git-repo
-export BUILD_DIR=`pwd`
+    # Top-level directory to be used to unpack/compile/install
+    # wireshark/lustre-git-repo
+    export BUILD_DIR=`pwd`
 
     # Directory location of wireshark source code
-export WS_HOME="${BUILD_DIR}/wireshark-${WS_VERSION}"
+    export WS_HOME="${BUILD_DIR}/wireshark-${WS_VERSION}"
 
     # (Relative) path of the wireshark contribution directory
-export LUSTRE_WS_DIR='lustre-release/lustre/contrib/wireshark'
+    export LUSTRE_WS_DIR='lustre-release/lustre/contrib/wireshark'
 
     # RPM internal name for the Lustre/LNet plugins
-export PLUGIN_RPM_NAME='lustre-wireshark-plugins'
+    export PLUGIN_RPM_NAME='lustre-wireshark-plugins'
 
     # TAR command + options to be used to create a bzip2 tarball
-export TAR='/bin/tar jcpf '
+    export TAR='/bin/tar jcpf '
     # TAR command + options to be used to unpack a bzip2 tarball
-export UNTAR='/bin/tar jxpf '
+    export UNTAR='/bin/tar jxpf '
+    exit 0
+}
+
+die() {
+    echo "wsconfig error:  $*"
+    exit 1
+} 1>&2
+
+# arg1: complete package name, with version
+# arg2: the minimum version
+#
+chk_ver() {
+    act_ver=${1#*-devel-} ; shift
+    act_ver=${act_ver%%-*}
+
+    declare low_ver=$(
+        printf "${act_ver}\n$1\n" | sort -V | head -n1 )
+    test "X$low_ver" = "X$1" || \
+        die "wireshark too old: $act_ver is before $1"
+}
+
+set_var() {
+    case "X$2" in
+    Xlibdir )
+        txt=$(echo $(rpm -q --list $1 | \
+            sed -n '\@/libwire@s@/libwire[^/]*$@@p' | \
+            sort -u) )
+        ;;
+    * )
+        die "unknown variable: $2"
+        ;;
+    esac
+}
+
+set_cflags() {
+    dlst=$(rpm -q --list $pkg | \
+        grep '/usr.*/include.*/wireshark$' | \
+        while read f ; do test -d $f && echo "$f" ; done)
+    rm -f config.h
+    for f in $dlst XX
+    do test -f $f/config.h && ln -s ${f}/config.h .
+        txt+=" -I$f"
+    done
+    test -f config.h || die "cannot find config header"
+}
+
+parse_wireshark() {
+    declare pkg=$(rpm -qa | sed -n '/wireshark-devel/{;p;q;}')
+    declare dlst=
+
+    while test $# -gt 1
+    do
+        txt=
+        case "$1" in
+        --libs )
+            txt=$(rpm -q --list $pkg | \
+                sed -n 's@\.so$@@p' | \
+                sed 's@.*/lib@-l@')
+            ;;
+
+        --cflags )
+            set_cflags
+            ;;
+
+        --modversion )
+            txt=${pkg#wireshark-devel-}
+            txt=${txt%%-*}
+            ;;
+
+        --atleast-version=* )
+            chk_ver ${pkg} ${1#*=}
+            ;;
+
+        --atleast-version )
+            shift
+            chk_ver ${pkg} ${1}
+            ;;
+
+        --variable=* )
+            set_var ${pkg} ${1#*=}
+            ;;
+
+        --variable )
+            shift
+            set_var ${pkg} ${1}
+            ;;
+
+        * )
+            die "unknown option: $1"
+            ;;
+        esac
+        test ${#txt} -gt 0 && \
+            printf "%s" "$(echo ' '$txt)"
+        shift
+    done
+    echo
+}
+
+pkg-config "$@" 2>/dev/null && exit 0
+
+pkg=$#
+case ${!pkg} in
+glib* )
+    fullpkg=$(rpm -qa | grep -E '^glib[2-9].*-devel' | head -n1)
+    dirs=$(rpm -q --list $fullpkg | \
+        while read f ; do test -d $f && echo $f ; done | \
+        grep -F /include)
+    for f in $dirs ; do printf "-I$f " ; done
+    rpm -q --list $fullpkg | \
+        sed -n 's@^.*/libglib@-lglib@p' | \
+        sed -n 's/\.so$//p' | \
+        head -n 1
+    ;;
+
+wireshark )
+    parse_wireshark "$@"
+    ;;
+
+* )
+    echo huh?
+    exit 1
+    ;;
+esac
index 855e18f..283546d 100644 (file)
 /** l_flags bits marked as "gone" bits */
 #define LDLM_FL_GONE_MASK               0x0006004000000000ULL
 
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK          0x0000206400000000ULL
-
 /** l_flags bits marked as "inherit" bits */
 #define LDLM_FL_INHERIT_MASK            0x0000000000800000ULL
 
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK         0x00FFFFFF00000000ULL
+/** l_flags bits marked as "off_wire" bits */
+#define LDLM_FL_OFF_WIRE_MASK           0x00FFFFFF00000000ULL
 
 /** l_flags bits marked as "on_wire" bits */
 #define LDLM_FL_ON_WIRE_MASK            0x00000000C08F932FULL
 #define ldlm_set_cancel(_l)             LDLM_SET_FLAG((  _l), 1ULL << 36)
 #define ldlm_clear_cancel(_l)           LDLM_CLEAR_FLAG((_l), 1ULL << 36)
 
-/** whatever it might mean */
+/** whatever it might mean -- never transmitted? */
 #define LDLM_FL_LOCAL_ONLY              0x0000002000000000ULL // bit  37
 #define ldlm_is_local_only(_l)          LDLM_TEST_FLAG(( _l), 1ULL << 37)
 #define ldlm_set_local_only(_l)         LDLM_SET_FLAG((  _l), 1ULL << 37)
  * to this client for the first operation, whereas the second operation
  * has canceled this lock and is waiting for rpc_lock which is taken by
  * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
- * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC. */
+ * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it. */
 #define LDLM_FL_BL_AST                  0x0000400000000000ULL // bit  46
 #define ldlm_is_bl_ast(_l)              LDLM_TEST_FLAG(( _l), 1ULL << 46)
 #define ldlm_set_bl_ast(_l)             LDLM_SET_FLAG((  _l), 1ULL << 46)
 #define ldlm_clear_bl_ast(_l)           LDLM_CLEAR_FLAG((_l), 1ULL << 46)
 
-/** whatever it might mean */
+/**
+ * Set by ldlm_cancel_callback() when lock cache is dropped to let
+ * ldlm_callback_handler() return EINVAL to the server. It is used when
+ * ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ * send a separate CANCEL RPC. */
 #define LDLM_FL_BL_DONE                 0x0000800000000000ULL // bit  47
 #define ldlm_is_bl_done(_l)             LDLM_TEST_FLAG(( _l), 1ULL << 47)
 #define ldlm_set_bl_done(_l)            LDLM_SET_FLAG((  _l), 1ULL << 47)
 #define ldlm_clear_excl(_l)             LDLM_CLEAR_FLAG((_l), 1ULL << 55)
 
 /** test for ldlm_lock flag bit set */
-#define LDLM_TEST_FLAG(_l, _b)        (((_l)->l_flags & (_b)) != 0)
+#define LDLM_TEST_FLAG(_l, _b)    (((_l)->l_flags & (_b)) != 0)
+
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m)    (((_l)->l_flags & LDLM_FL_##_m##_MASK) != 0)
 
 /** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b)         (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b)     ((_l)->l_flags |= (_b))
 
 /** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b)       (((_l)->l_flags &= ~(_b))
-
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS            LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS                LDLM_FL_AST_MASK
+#define LDLM_CLEAR_FLAG(_l, _b)   ((_l)->l_flags &= ~(_b))
 
 /** @} subgroup */
 /** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed        = -1;
-static int hf_lustre_ldlm_fl_block_granted       = -1;
-static int hf_lustre_ldlm_fl_block_conv          = -1;
-static int hf_lustre_ldlm_fl_block_wait          = -1;
-static int hf_lustre_ldlm_fl_ast_sent            = -1;
-static int hf_lustre_ldlm_fl_replay              = -1;
-static int hf_lustre_ldlm_fl_intent_only         = -1;
-static int hf_lustre_ldlm_fl_has_intent          = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock      = -1;
-static int hf_lustre_ldlm_fl_discard_data        = -1;
-static int hf_lustre_ldlm_fl_no_timeout          = -1;
-static int hf_lustre_ldlm_fl_block_nowait        = -1;
-static int hf_lustre_ldlm_fl_test_lock           = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block     = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention  = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data    = -1;
-static int hf_lustre_ldlm_fl_fail_loc            = -1;
-static int hf_lustre_ldlm_fl_skipped             = -1;
-static int hf_lustre_ldlm_fl_cbpending           = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc       = -1;
-static int hf_lustre_ldlm_fl_cancel              = -1;
-static int hf_lustre_ldlm_fl_local_only          = -1;
-static int hf_lustre_ldlm_fl_failed              = -1;
-static int hf_lustre_ldlm_fl_canceling           = -1;
-static int hf_lustre_ldlm_fl_local               = -1;
-static int hf_lustre_ldlm_fl_lvb_ready           = -1;
-static int hf_lustre_ldlm_fl_kms_ignore          = -1;
-static int hf_lustre_ldlm_fl_cp_reqd             = -1;
-static int hf_lustre_ldlm_fl_cleaned             = -1;
-static int hf_lustre_ldlm_fl_atomic_cb           = -1;
-static int hf_lustre_ldlm_fl_bl_ast              = -1;
-static int hf_lustre_ldlm_fl_bl_done             = -1;
-static int hf_lustre_ldlm_fl_no_lru              = -1;
-static int hf_lustre_ldlm_fl_fail_notified       = -1;
-static int hf_lustre_ldlm_fl_destroyed           = -1;
-static int hf_lustre_ldlm_fl_server_lock         = -1;
-static int hf_lustre_ldlm_fl_res_locked          = -1;
-static int hf_lustre_ldlm_fl_waited              = -1;
-static int hf_lustre_ldlm_fl_ns_srv              = -1;
-static int hf_lustre_ldlm_fl_excl                = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
-  {LDLM_FL_LOCK_CHANGED,        "LDLM_FL_LOCK_CHANGED"},
-  {LDLM_FL_BLOCK_GRANTED,       "LDLM_FL_BLOCK_GRANTED"},
-  {LDLM_FL_BLOCK_CONV,          "LDLM_FL_BLOCK_CONV"},
-  {LDLM_FL_BLOCK_WAIT,          "LDLM_FL_BLOCK_WAIT"},
-  {LDLM_FL_AST_SENT,            "LDLM_FL_AST_SENT"},
-  {LDLM_FL_REPLAY,              "LDLM_FL_REPLAY"},
-  {LDLM_FL_INTENT_ONLY,         "LDLM_FL_INTENT_ONLY"},
-  {LDLM_FL_HAS_INTENT,          "LDLM_FL_HAS_INTENT"},
-  {LDLM_FL_FLOCK_DEADLOCK,      "LDLM_FL_FLOCK_DEADLOCK"},
-  {LDLM_FL_DISCARD_DATA,        "LDLM_FL_DISCARD_DATA"},
-  {LDLM_FL_NO_TIMEOUT,          "LDLM_FL_NO_TIMEOUT"},
-  {LDLM_FL_BLOCK_NOWAIT,        "LDLM_FL_BLOCK_NOWAIT"},
-  {LDLM_FL_TEST_LOCK,           "LDLM_FL_TEST_LOCK"},
-  {LDLM_FL_CANCEL_ON_BLOCK,     "LDLM_FL_CANCEL_ON_BLOCK"},
-  {LDLM_FL_DENY_ON_CONTENTION,  "LDLM_FL_DENY_ON_CONTENTION"},
-  {LDLM_FL_AST_DISCARD_DATA,    "LDLM_FL_AST_DISCARD_DATA"},
-  {LDLM_FL_FAIL_LOC,            "LDLM_FL_FAIL_LOC"},
-  {LDLM_FL_SKIPPED,             "LDLM_FL_SKIPPED"},
-  {LDLM_FL_CBPENDING,           "LDLM_FL_CBPENDING"},
-  {LDLM_FL_WAIT_NOREPROC,       "LDLM_FL_WAIT_NOREPROC"},
-  {LDLM_FL_CANCEL,              "LDLM_FL_CANCEL"},
-  {LDLM_FL_LOCAL_ONLY,          "LDLM_FL_LOCAL_ONLY"},
-  {LDLM_FL_FAILED,              "LDLM_FL_FAILED"},
-  {LDLM_FL_CANCELING,           "LDLM_FL_CANCELING"},
-  {LDLM_FL_LOCAL,               "LDLM_FL_LOCAL"},
-  {LDLM_FL_LVB_READY,           "LDLM_FL_LVB_READY"},
-  {LDLM_FL_KMS_IGNORE,          "LDLM_FL_KMS_IGNORE"},
-  {LDLM_FL_CP_REQD,             "LDLM_FL_CP_REQD"},
-  {LDLM_FL_CLEANED,             "LDLM_FL_CLEANED"},
-  {LDLM_FL_ATOMIC_CB,           "LDLM_FL_ATOMIC_CB"},
-  {LDLM_FL_BL_AST,              "LDLM_FL_BL_AST"},
-  {LDLM_FL_BL_DONE,             "LDLM_FL_BL_DONE"},
-  {LDLM_FL_NO_LRU,              "LDLM_FL_NO_LRU"},
-  {LDLM_FL_FAIL_NOTIFIED,       "LDLM_FL_FAIL_NOTIFIED"},
-  {LDLM_FL_DESTROYED,           "LDLM_FL_DESTROYED"},
-  {LDLM_FL_SERVER_LOCK,         "LDLM_FL_SERVER_LOCK"},
-  {LDLM_FL_RES_LOCKED,          "LDLM_FL_RES_LOCKED"},
-  {LDLM_FL_WAITED,              "LDLM_FL_WAITED"},
-  {LDLM_FL_NS_SRV,              "LDLM_FL_NS_SRV"},
-  {LDLM_FL_EXCL,                "LDLM_FL_EXCL"},
-  { 0, NULL }
-};
-#endif /*  WIRESHARK_COMPILE */
 #endif /* LDLM_ALL_FLAGS_MASK */
+
index e4a688d..904ad30 100644 (file)
 struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
 {
        /* on server-side resource of lock doesn't change */
-       if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+       if (!ldlm_is_ns_srv(lock))
                spin_lock(&lock->l_lock);
 
        lock_res(lock->l_resource);
 
-       lock->l_flags |= LDLM_FL_RES_LOCKED;
+       ldlm_set_res_locked(lock);
        return lock->l_resource;
 }
 EXPORT_SYMBOL(lock_res_and_lock);
@@ -71,10 +71,10 @@ EXPORT_SYMBOL(lock_res_and_lock);
 void unlock_res_and_lock(struct ldlm_lock *lock)
 {
        /* on server-side resource of lock doesn't change */
-       lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+       ldlm_clear_res_locked(lock);
 
        unlock_res(lock->l_resource);
-       if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+       if (!ldlm_is_ns_srv(lock))
                spin_unlock(&lock->l_lock);
 }
 EXPORT_SYMBOL(unlock_res_and_lock);
index dbcbc72..ab457c7 100644 (file)
@@ -502,27 +502,28 @@ ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                                       req->l_policy_data.l_extent.start) &&
                                      (lock->l_policy_data.l_extent.end >=
                                       req->l_policy_data.l_extent.end))) {
-                                        /* If we met a PR lock just like us or wider,
-                                           and nobody down the list conflicted with
-                                           it, that means we can skip processing of
-                                           the rest of the list and safely place
-                                           ourselves at the end of the list, or grant
-                                           (dependent if we met an conflicting locks
-                                           before in the list).
-                                           In case of 1st enqueue only we continue
-                                           traversing if there is something conflicting
-                                           down the list because we need to make sure
-                                           that something is marked as AST_SENT as well,
-                                           in cse of empy worklist we would exit on
-                                           first conflict met. */
-                                        /* There IS a case where such flag is
-                                           not set for a lock, yet it blocks
-                                           something. Luckily for us this is
-                                           only during destroy, so lock is
-                                           exclusive. So here we are safe */
-                                        if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
-                                                RETURN(compat);
-                                        }
+                                       /* If we met a PR lock just like us or
+                                          wider, and nobody down the list
+                                          conflicted with it, that means we
+                                          can skip processing of the rest of
+                                          the list and safely place ourselves
+                                          at the end of the list, or grant
+                                          (dependent if we met an conflicting
+                                          locks before in the list).  In case
+                                          of 1st enqueue only we continue
+                                          traversing if there is something
+                                          conflicting down the list because
+                                          we need to make sure that something
+                                          is marked as AST_SENT as well, in
+                                          cse of empy worklist we would exit
+                                          on first conflict met. */
+                                       /* There IS a case where such flag is
+                                          not set for a lock, yet it blocks
+                                          something. Luckily for us this is
+                                          only during destroy, so lock is
+                                          exclusive. So here we are safe */
+                                       if (!ldlm_is_ast_sent(lock))
+                                               RETURN(compat);
                                 }
 
                                 /* non-group locks are compatible, overlap doesn't
@@ -656,8 +657,8 @@ static void discard_bl_list(cfs_list_t *bl_list)
                         cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
 
                 cfs_list_del_init(&lock->l_bl_ast);
-                LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
-                lock->l_flags &= ~LDLM_FL_AST_SENT;
+               LASSERT(ldlm_is_ast_sent(lock));
+               ldlm_clear_ast_sent(lock);
                 LASSERT(lock->l_bl_ast_run == 0);
                 LASSERT(lock->l_blocking_lock);
                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
@@ -694,7 +695,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
 
         LASSERT(cfs_list_empty(&res->lr_converting));
         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
-               !(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
+               !ldlm_is_ast_discard_data(lock));
         check_res_locked(res);
         *err = ELDLM_OK;
 
@@ -766,7 +767,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
                         * in ldlm_lock_destroy. Anyway, this always happens
                         * when a client is being evicted. So it would be
                         * ok to return an error. -jay */
-                       if (lock->l_flags & LDLM_FL_DESTROYED) {
+                       if (ldlm_is_destroyed(lock)) {
                                *err = -EAGAIN;
                                GOTO(out, rc = -EAGAIN);
                        }
@@ -794,7 +795,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
        RETURN(0);
 out:
        if (!cfs_list_empty(&rpc_list)) {
-               LASSERT(!(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
+               LASSERT(!ldlm_is_ast_discard_data(lock));
                discard_bl_list(&rpc_list);
        }
        RETURN(rc);
@@ -817,12 +818,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
         /* don't let another thread in ldlm_extent_shift_kms race in
          * just after we finish and take our lock into account in its
          * calculation of the kms */
-        lock->l_flags |= LDLM_FL_KMS_IGNORE;
+       ldlm_set_kms_ignore(lock);
 
         cfs_list_for_each(tmp, &res->lr_granted) {
                 lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
-                if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+               if (ldlm_is_kms_ignore(lck))
                         continue;
 
                 if (lck->l_policy_data.l_extent.end >= old_kms)
index f5e63fe..8e4d453 100644 (file)
@@ -137,7 +137,7 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
 static inline void
 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
 {
-        ENTRY;
+       ENTRY;
 
        LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
                   mode, flags);
@@ -145,11 +145,10 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
        /* Safe to not lock here, since it should be empty anyway */
        LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
 
-        cfs_list_del_init(&lock->l_res_link);
-        if (flags == LDLM_FL_WAIT_NOREPROC &&
-            !(lock->l_flags & LDLM_FL_FAILED)) {
-                /* client side - set a flag to prevent sending a CANCEL */
-                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+       cfs_list_del_init(&lock->l_res_link);
+       if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
+               /* client side - set a flag to prevent sending a CANCEL */
+               lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
 
                 /* when reaching here, it is under lock_res_and_lock(). Thus,
                    need call the nolock version of ldlm_lock_decref_internal*/
@@ -264,7 +263,7 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
                                "support flock canceliation\n");
        } else {
                LASSERT(lock->l_completion_ast);
-               LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+               LASSERT(!ldlm_is_ast_sent(lock));
                lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
                        LDLM_FL_FLOCK_DEADLOCK;
                ldlm_flock_blocking_unlink(lock);
@@ -646,7 +645,7 @@ ldlm_flock_interrupted_wait(void *data)
         ldlm_flock_blocking_unlink(lock);
 
        /* client side - set flag to prevent lock from being put on LRU list */
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
         unlock_res_and_lock(lock);
 
         EXIT;
@@ -734,7 +733,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 granted:
         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
 
-        if (lock->l_flags & LDLM_FL_FAILED) {
+        if (ldlm_is_failed(lock)) {
                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
                 RETURN(-EIO);
         }
@@ -747,7 +746,7 @@ granted:
        /* Protect against race where lock could have been just destroyed
         * due to overlap in ldlm_process_flock_lock().
         */
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                unlock_res_and_lock(lock);
                LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
                RETURN(0);
@@ -759,7 +758,7 @@ granted:
         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
         cfs_list_del_init(&lock->l_res_link);
 
-       if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+       if (ldlm_is_flock_deadlock(lock)) {
                LDLM_DEBUG(lock, "client-side enqueue deadlock received");
                rc = -EDEADLK;
        } else if (flags & LDLM_FL_TEST_LOCK) {
index 6b96b40..5077072 100644 (file)
@@ -294,9 +294,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
         int ret = 0;
 
         lock_res_and_lock(lock);
-        if (((lock->l_req_mode == lock->l_granted_mode) &&
-             !(lock->l_flags & LDLM_FL_CP_REQD)) ||
-            (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+       if ((lock->l_req_mode == lock->l_granted_mode) &&
+            !ldlm_is_cp_reqd(lock))
+               ret = 1;
+       else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
                 ret = 1;
         unlock_res_and_lock(lock);
 
index d7ad151..8853bc7 100644 (file)
@@ -219,7 +219,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
                            "final lock_put on destroyed lock, freeing it.");
 
                 res = lock->l_resource;
-               LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+               LASSERT(ldlm_is_destroyed(lock));
                 LASSERT(cfs_list_empty(&lock->l_res_link));
                 LASSERT(cfs_list_empty(&lock->l_pending_chain));
 
@@ -256,8 +256,7 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
 
                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
                 cfs_list_del_init(&lock->l_lru);
-                if (lock->l_flags & LDLM_FL_SKIPPED)
-                        lock->l_flags &= ~LDLM_FL_SKIPPED;
+               ldlm_clear_skipped(lock);
                 LASSERT(ns->ns_nr_unused > 0);
                 ns->ns_nr_unused--;
                 rc = 1;
@@ -274,7 +273,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
        int rc;
 
        ENTRY;
-       if (lock->l_flags & LDLM_FL_NS_SRV) {
+       if (ldlm_is_ns_srv(lock)) {
                LASSERT(cfs_list_empty(&lock->l_lru));
                RETURN(0);
        }
@@ -325,7 +324,7 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
        ENTRY;
-       if (lock->l_flags & LDLM_FL_NS_SRV) {
+       if (ldlm_is_ns_srv(lock)) {
                LASSERT(cfs_list_empty(&lock->l_lru));
                EXIT;
                return;
@@ -373,12 +372,12 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
                 LBUG();
         }
 
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                LASSERT(cfs_list_empty(&lock->l_lru));
                EXIT;
                return 0;
        }
-       lock->l_flags |= LDLM_FL_DESTROYED;
+       ldlm_set_destroyed(lock);
 
        if (lock->l_export && lock->l_export->exp_lock_hash) {
                /* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -616,7 +615,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
 
        /* It's unlikely but possible that someone marked the lock as
         * destroyed after we did handle2object on it */
-       if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
+       if ((flags == 0) && !ldlm_is_destroyed(lock)) {
                lu_ref_add(&lock->l_reference, "handle", current);
                RETURN(lock);
        }
@@ -626,21 +625,23 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
        LASSERT(lock->l_resource != NULL);
 
        lu_ref_add_atomic(&lock->l_reference, "handle", current);
-       if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+       if (unlikely(ldlm_is_destroyed(lock))) {
                unlock_res_and_lock(lock);
                CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
                LDLM_LOCK_PUT(lock);
                RETURN(NULL);
        }
 
-       if (flags && (lock->l_flags & flags)) {
-               unlock_res_and_lock(lock);
-               LDLM_LOCK_PUT(lock);
-               RETURN(NULL);
-       }
+       /* If we're setting flags, make sure none of them are already set. */
+       if (flags != 0) {
+               if ((lock->l_flags & flags) != 0) {
+                       unlock_res_and_lock(lock);
+                       LDLM_LOCK_PUT(lock);
+                       RETURN(NULL);
+               }
 
-       if (flags)
                lock->l_flags |= flags;
+       }
 
        unlock_res_and_lock(lock);
        RETURN(lock);
@@ -710,13 +711,13 @@ EXPORT_SYMBOL(ldlm_lock2desc);
 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                            cfs_list_t *work_list)
 {
-        if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
-                LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
-                lock->l_flags |= LDLM_FL_AST_SENT;
-                /* If the enqueuing client said so, tell the AST recipient to
-                 * discard dirty data, rather than writing back. */
-               if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
-                        lock->l_flags |= LDLM_FL_DISCARD_DATA;
+       if (!ldlm_is_ast_sent(lock)) {
+               LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
+               ldlm_set_ast_sent(lock);
+               /* If the enqueuing client said so, tell the AST recipient to
+                * discard dirty data, rather than writing back. */
+               if (ldlm_is_ast_discard_data(new))
+                       ldlm_set_discard_data(lock);
                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
                 cfs_list_add(&lock->l_bl_ast, work_list);
                 LDLM_LOCK_GET(lock);
@@ -730,8 +731,8 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
  */
 void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
 {
-        if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
-                lock->l_flags |= LDLM_FL_CP_REQD;
+       if (!ldlm_is_cp_reqd(lock)) {
+               ldlm_set_cp_reqd(lock);
                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
                 LASSERT(cfs_list_empty(&lock->l_cp_ast));
                 cfs_list_add(&lock->l_cp_ast, work_list);
@@ -814,7 +815,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
         if (lock != NULL) {
                 lock_res_and_lock(lock);
                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
-                    !(lock->l_flags & LDLM_FL_CBPENDING)) {
+                   !ldlm_is_cbpending(lock)) {
                         ldlm_lock_addref_internal_nolock(lock, mode);
                         result = 0;
                 }
@@ -881,19 +882,19 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
 
         ldlm_lock_decref_internal_nolock(lock, mode);
 
-        if (lock->l_flags & LDLM_FL_LOCAL &&
+       if (ldlm_is_local(lock) &&
             !lock->l_readers && !lock->l_writers) {
                 /* If this is a local lock on a server namespace and this was
                  * the last reference, cancel the lock. */
                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
-                lock->l_flags |= LDLM_FL_CBPENDING;
+               ldlm_set_cbpending(lock);
         }
 
         if (!lock->l_readers && !lock->l_writers &&
-            (lock->l_flags & LDLM_FL_CBPENDING)) {
+           ldlm_is_cbpending(lock)) {
                 /* If we received a blocked AST and this was the last reference,
                  * run the callback. */
-               if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
+               if (ldlm_is_ns_srv(lock) && lock->l_export)
                         CERROR("FL_CBPENDING set on non-local lock--just a "
                                "warning\n");
 
@@ -903,16 +904,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
                 ldlm_lock_remove_from_lru(lock);
                 unlock_res_and_lock(lock);
 
-                if (lock->l_flags & LDLM_FL_FAIL_LOC)
+               if (ldlm_is_fail_loc(lock))
                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
 
-                if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+               if (ldlm_is_atomic_cb(lock) ||
                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
                         ldlm_handle_bl_callback(ns, NULL, lock);
         } else if (ns_is_client(ns) &&
                    !lock->l_readers && !lock->l_writers &&
-                   !(lock->l_flags & LDLM_FL_NO_LRU) &&
-                   !(lock->l_flags & LDLM_FL_BL_AST)) {
+                  !ldlm_is_no_lru(lock) &&
+                  !ldlm_is_bl_ast(lock)) {
 
                 LDLM_DEBUG(lock, "add lock into lru list");
 
@@ -921,7 +922,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
                 ldlm_lock_add_to_lru(lock);
                 unlock_res_and_lock(lock);
 
-                if (lock->l_flags & LDLM_FL_FAIL_LOC)
+               if (ldlm_is_fail_loc(lock))
                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
 
                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -966,7 +967,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
 
         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
         lock_res_and_lock(lock);
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
         unlock_res_and_lock(lock);
         ldlm_lock_decref_internal(lock, mode);
         LDLM_LOCK_PUT(lock);
@@ -1089,7 +1090,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
         ldlm_resource_dump(D_INFO, res);
         LDLM_DEBUG(lock, "About to add lock:");
 
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
                 return;
         }
@@ -1198,10 +1199,10 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue,
                  * this is generally only going to be used by children
                  * whose parents already hold a lock so forward progress
                  * can still happen. */
-                if (lock->l_flags & LDLM_FL_CBPENDING &&
+               if (ldlm_is_cbpending(lock) &&
                     !(flags & LDLM_FL_CBPENDING))
                         continue;
-                if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+               if (!unref && ldlm_is_cbpending(lock) &&
                     lock->l_readers == 0 && lock->l_writers == 0)
                         continue;
 
@@ -1228,11 +1229,11 @@ static struct ldlm_lock *search_queue(cfs_list_t *queue,
                       policy->l_inodebits.bits))
                         continue;
 
-               if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+               if (!unref && LDLM_HAVE_MASK(lock, GONE))
                         continue;
 
                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
-                    !(lock->l_flags & LDLM_FL_LOCAL))
+                   !ldlm_is_local(lock))
                         continue;
 
                 if (flags & LDLM_FL_TEST_LOCK) {
@@ -1274,7 +1275,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match);
  */
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
 {
-       lock->l_flags |= LDLM_FL_LVB_READY;
+       ldlm_set_lvb_ready(lock);
        wake_up_all(&lock->l_waitq);
 }
 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1374,7 +1375,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
         if (lock) {
                 ldlm_lock2handle(lock, lockh);
                 if ((flags & LDLM_FL_LVB_READY) &&
-                    (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+                   (!ldlm_is_lvb_ready(lock))) {
                        __u64 wait_flags = LDLM_FL_LVB_READY |
                                LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
                         struct l_wait_info lwi;
@@ -1400,7 +1401,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
                        l_wait_event(lock->l_waitq,
                                     lock->l_flags & wait_flags,
                                     &lwi);
-                        if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+                       if (!ldlm_is_lvb_ready(lock)) {
                                 if (flags & LDLM_FL_TEST_LOCK)
                                         LDLM_LOCK_RELEASE(lock);
                                 else
@@ -1455,10 +1456,10 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
         lock = ldlm_handle2lock(lockh);
         if (lock != NULL) {
                 lock_res_and_lock(lock);
-               if (lock->l_flags & LDLM_FL_GONE_MASK)
+               if (LDLM_HAVE_MASK(lock, GONE))
                         GOTO(out, mode);
 
-                if (lock->l_flags & LDLM_FL_CBPENDING &&
+               if (ldlm_is_cbpending(lock) &&
                     lock->l_readers == 0 && lock->l_writers == 0)
                         GOTO(out, mode);
 
@@ -1607,7 +1608,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
        lock->l_ast_data = data;
        lock->l_pid = current_pid();
        if (ns_is_server(ns))
-               lock->l_flags |= LDLM_FL_NS_SRV;
+               ldlm_set_ns_srv(lock);
         if (cbs) {
                 lock->l_blocking_ast = cbs->lcs_blocking;
                 lock->l_completion_ast = cbs->lcs_completion;
@@ -1719,7 +1720,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
 
        /* Some flags from the enqueue want to make it into the AST, via the
         * lock's l_flags. */
-       lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+       if (*flags & LDLM_FL_AST_DISCARD_DATA)
+               ldlm_set_ast_discard_data(lock);
 
        /* This distinction between local lock trees is very important; a client
         * namespace only has information about locks taken by that client, and
@@ -1832,7 +1834,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        lock_res_and_lock(lock);
        cfs_list_del_init(&lock->l_bl_ast);
 
-       LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+       LASSERT(ldlm_is_ast_sent(lock));
        LASSERT(lock->l_bl_ast_run == 0);
        LASSERT(lock->l_blocking_lock);
        lock->l_bl_ast_run++;
@@ -1879,11 +1881,11 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        /* nobody should touch l_cp_ast */
        lock_res_and_lock(lock);
        cfs_list_del_init(&lock->l_cp_ast);
-       LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+       LASSERT(ldlm_is_cp_reqd(lock));
        /* save l_completion_ast since it can be changed by
         * mds_intent_policy(), see bug 14225 */
        completion_callback = lock->l_completion_ast;
-       lock->l_flags &= ~LDLM_FL_CP_REQD;
+       ldlm_clear_cp_reqd(lock);
        unlock_res_and_lock(lock);
 
        if (completion_callback != NULL)
@@ -2106,8 +2108,8 @@ EXPORT_SYMBOL(ldlm_reprocess_all);
 void ldlm_cancel_callback(struct ldlm_lock *lock)
 {
        check_res_locked(lock->l_resource);
-       if (!(lock->l_flags & LDLM_FL_CANCEL)) {
-               lock->l_flags |= LDLM_FL_CANCEL;
+       if (!ldlm_is_cancel(lock)) {
+               ldlm_set_cancel(lock);
                if (lock->l_blocking_ast) {
                         unlock_res_and_lock(lock);
                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -2117,7 +2119,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
                         LDLM_DEBUG(lock, "no blocking ast");
                 }
         }
-        lock->l_flags |= LDLM_FL_BL_DONE;
+       ldlm_set_bl_done(lock);
 }
 
 /**
@@ -2154,7 +2156,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
                 LBUG();
         }
 
-       if (lock->l_flags & LDLM_FL_WAITED)
+       if (ldlm_is_waited(lock))
                ldlm_del_waiting_lock(lock);
 
         /* Releases cancel callback. */
@@ -2162,7 +2164,7 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
 
        /* Yes, second time, just in case it was added again while we were
         * running with no res lock in ldlm_cancel_callback */
-       if (lock->l_flags & LDLM_FL_WAITED)
+       if (ldlm_is_waited(lock))
                ldlm_del_waiting_lock(lock);
 
         ldlm_resource_unlink_lock(lock);
index 2118653..9b9df2b 100644 (file)
@@ -233,7 +233,7 @@ static int expired_lock_main(void *arg)
                                continue;
                        }
 
-                       if (lock->l_flags & LDLM_FL_DESTROYED) {
+                       if (ldlm_is_destroyed(lock)) {
                                /* release the lock refcount where
                                 * waiting_locks_callback() founds */
                                LDLM_LOCK_RELEASE(lock);
@@ -421,13 +421,13 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
        int timeout = ldlm_get_enq_timeout(lock);
 
        /* NB: must be called with hold of lock_res_and_lock() */
-       LASSERT(lock->l_flags & LDLM_FL_RES_LOCKED);
-       lock->l_flags |= LDLM_FL_WAITED;
+       LASSERT(ldlm_is_res_locked(lock));
+       ldlm_set_waited(lock);
 
-       LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
+       LASSERT(!ldlm_is_cancel_on_block(lock));
 
        spin_lock_bh(&waiting_locks_spinlock);
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                static cfs_time_t next;
                spin_unlock_bh(&waiting_locks_spinlock);
                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
@@ -572,8 +572,7 @@ int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
 # ifdef HAVE_SERVER_SUPPORT
 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
 {
-       LASSERT((lock->l_flags & (LDLM_FL_RES_LOCKED|LDLM_FL_CANCEL_ON_BLOCK))
-               == LDLM_FL_RES_LOCKED);
+       LASSERT(ldlm_is_res_locked(lock) && !ldlm_is_cancel_on_block(lock));
        RETURN(1);
 }
 
@@ -627,7 +626,7 @@ static int ldlm_handle_ast_error(struct ldlm_lock *lock,
                                    libcfs_nid2str(peer.nid));
                         ldlm_lock_cancel(lock);
                         rc = -ERESTART;
-                } else if (lock->l_flags & LDLM_FL_CANCEL) {
+               } else if (ldlm_is_cancel(lock)) {
                         LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
                                    "cancel was received (AST reply lost?)",
                                    ast_type, libcfs_nid2str(peer.nid));
@@ -831,20 +830,20 @@ int ldlm_server_blocking_ast(struct ldlm_lock *lock,
                RETURN(0);
        }
 
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                /* What's the point? */
                unlock_res_and_lock(lock);
                ptlrpc_req_finished(req);
                RETURN(0);
        }
 
-        if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
+       if (ldlm_is_cancel_on_block(lock))
                 instant_cancel = 1;
 
         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
         body->lock_handle[0] = lock->l_remote_handle;
         body->lock_desc = *desc;
-       body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_AST_FLAGS);
+       body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
 
         LDLM_DEBUG(lock, "server preparing blocking AST");
 
@@ -979,11 +978,11 @@ int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 
         /* We only send real blocking ASTs after the lock is granted */
         lock_res_and_lock(lock);
-        if (lock->l_flags & LDLM_FL_AST_SENT) {
+       if (ldlm_is_ast_sent(lock)) {
                body->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
                /* Copy AST flags like LDLM_FL_DISCARD_DATA. */
                body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
-                                                      LDLM_AST_FLAGS);
+                                                      LDLM_FL_AST_MASK);
 
                 /* We might get here prior to ldlm_handle_enqueue setting
                  * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
@@ -991,7 +990,7 @@ int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
                  * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
                  * that would not only cancel the lock, but will also remove
                  * it from waiting list */
-                if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+               if (ldlm_is_cancel_on_block(lock)) {
                         unlock_res_and_lock(lock);
                         ldlm_lock_cancel(lock);
                         instant_cancel = 1;
@@ -1317,9 +1316,9 @@ existing_lock:
 
         /* Now take into account flags to be inherited from original lock
            request both in reply to client and in our own lock flags. */
-        dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
+       dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_FL_INHERIT_MASK;
        lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
-                                             LDLM_INHERIT_FLAGS);
+                                             LDLM_FL_INHERIT_MASK);
 
         /* Don't move a pending lock onto the export if it has already been
          * disconnected due to eviction (bug 5683) or server umount (bug 24324).
@@ -1328,7 +1327,7 @@ existing_lock:
                      OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
                 LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
                 rc = -ENOTCONN;
-        } else if (lock->l_flags & LDLM_FL_AST_SENT) {
+       } else if (ldlm_is_ast_sent(lock)) {
                dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
                 if (lock->l_granted_mode == lock->l_req_mode) {
                         /*
@@ -1351,7 +1350,7 @@ existing_lock:
         if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
             dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
              req->rq_export->exp_libclient) {
-                if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
+               if (unlikely(!ldlm_is_cancel_on_block(lock) ||
                              !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
                         CERROR("Granting sync lock to libclient. "
                                "req fl %d, rep fl %d, lock fl "LPX64"\n",
@@ -1655,10 +1654,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
         LDLM_DEBUG(lock, "client blocking AST callback handler");
 
         lock_res_and_lock(lock);
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
 
-        if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
-                lock->l_flags |= LDLM_FL_CANCEL;
+       if (ldlm_is_cancel_on_block(lock))
+               ldlm_set_cancel(lock);
 
         do_ast = (!lock->l_readers && !lock->l_writers);
         unlock_res_and_lock(lock);
@@ -1702,7 +1701,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
                        schedule_timeout_and_set_state(
                                TASK_INTERRUPTIBLE, to);
                        if (lock->l_granted_mode == lock->l_req_mode ||
-                           lock->l_flags & LDLM_FL_DESTROYED)
+                           ldlm_is_destroyed(lock))
                                break;
                }
        }
@@ -1742,7 +1741,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
        }
 
        lock_res_and_lock(lock);
-       if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+       if (ldlm_is_destroyed(lock) ||
            lock->l_granted_mode == lock->l_req_mode) {
                /* bug 11300: the lock has already been granted */
                unlock_res_and_lock(lock);
@@ -1785,7 +1784,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
                /* BL_AST locks are not needed in LRU.
                 * Let ldlm_cancel_lru() be fast. */
                 ldlm_lock_remove_from_lru(lock);
-                lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+               lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
         }
 
@@ -1816,7 +1815,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
 out:
        if (rc < 0) {
                lock_res_and_lock(lock);
-               lock->l_flags |= LDLM_FL_FAILED;
+               ldlm_set_failed(lock);
                unlock_res_and_lock(lock);
                wake_up(&lock->l_waitq);
        }
@@ -1891,7 +1890,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
 
        spin_lock(&blp->blp_lock);
        if (blwi->blwi_lock &&
-           blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+           ldlm_is_discard_data(blwi->blwi_lock)) {
                /* add LDLM_FL_DISCARD_DATA requests to the priority list */
                cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
        } else {
@@ -2197,22 +2196,21 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
                 RETURN(0);
         }
 
-        if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+       if (ldlm_is_fail_loc(lock) &&
             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
                 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
 
         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
         lock_res_and_lock(lock);
        lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
-                                             LDLM_AST_FLAGS);
+                                             LDLM_FL_AST_MASK);
         if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
                 /* If somebody cancels lock and cache is already dropped,
                  * or lock is failed before cp_ast received on client,
                  * we can tell the server we have no lock. Otherwise, we
                  * should send cancel after dropping the cache. */
-                if (((lock->l_flags & LDLM_FL_CANCELING) &&
-                    (lock->l_flags & LDLM_FL_BL_DONE)) ||
-                    (lock->l_flags & LDLM_FL_FAILED)) {
+               if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+                   ldlm_is_failed(lock)) {
                         LDLM_DEBUG(lock, "callback on lock "
                                    LPX64" - lock disappeared\n",
                                    dlm_req->lock_handle[0].cookie);
@@ -2226,7 +2224,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
                /* BL_AST locks are not needed in LRU.
                 * Let ldlm_cancel_lru() be fast. */
                 ldlm_lock_remove_from_lru(lock);
-                lock->l_flags |= LDLM_FL_BL_AST;
+               ldlm_set_bl_ast(lock);
         }
         unlock_res_and_lock(lock);
 
@@ -2243,7 +2241,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
         case LDLM_BL_CALLBACK:
                 CDEBUG(D_INODE, "blocking ast\n");
                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
-                if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+               if (!ldlm_is_cancel_on_block(lock)) {
                         rc = ldlm_callback_reply(req, 0);
                         if (req->rq_no_reply || rc)
                                 ldlm_callback_errmsg(req, "Normal process", rc,
@@ -2384,7 +2382,7 @@ static int ldlm_cancel_hpreq_check(struct ptlrpc_request *req)
                 if (lock == NULL)
                         continue;
 
-                rc = !!(lock->l_flags & LDLM_FL_AST_SENT);
+               rc = ldlm_is_ast_sent(lock) ? 1 : 0;
                 if (rc)
                         LDLM_DEBUG(lock, "hpreq cancel lock");
                 LDLM_LOCK_PUT(lock);
@@ -2439,7 +2437,7 @@ int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
                 return 0;
         }
 
-        if (lock->l_flags & LDLM_FL_AST_SENT) {
+       if (ldlm_is_ast_sent(lock)) {
                 unlock_res_and_lock(lock);
                 return 0;
         }
@@ -2447,7 +2445,7 @@ int ldlm_revoke_lock_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
         LASSERT(lock->l_blocking_ast);
         LASSERT(!lock->l_blocking_lock);
 
-        lock->l_flags |= LDLM_FL_AST_SENT;
+       ldlm_set_ast_sent(lock);
         if (lock->l_export && lock->l_export->exp_lock_hash) {
                /* NB: it's safe to call cfs_hash_del() even lock isn't
                 * in exp_lock_hash. */
index 1aa451b..74087e0 100644 (file)
@@ -161,7 +161,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
        long delay;
        int  result;
 
-       if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+       if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
                LDLM_DEBUG(lock, "client-side enqueue: destroyed");
                result = -EIO;
        } else {
@@ -267,7 +267,7 @@ noreproc:
 
         lwd.lwd_lock = lock;
 
-        if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+       if (ldlm_is_no_timeout(lock)) {
                 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
                 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
         } else {
@@ -285,7 +285,7 @@ noreproc:
         if (ns_is_client(ldlm_lock_to_ns(lock)) &&
             OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
                                  OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
-                lock->l_flags |= LDLM_FL_FAIL_LOC;
+               ldlm_set_fail_loc(lock);
                 rc = -EINTR;
         } else {
                 /* Go to sleep until the lock is granted or cancelled. */
@@ -319,7 +319,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
         int do_ast;
         ENTRY;
 
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
         do_ast = (!lock->l_readers && !lock->l_writers);
         unlock_res_and_lock(lock);
 
@@ -443,9 +443,9 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
         /* NB: we don't have any lock now (lock_res_and_lock)
          * because it's a new lock */
         ldlm_lock_addref_internal_nolock(lock, mode);
-        lock->l_flags |= LDLM_FL_LOCAL;
+       ldlm_set_local(lock);
         if (*flags & LDLM_FL_ATOMIC_CB)
-                lock->l_flags |= LDLM_FL_ATOMIC_CB;
+               ldlm_set_atomic_cb(lock);
 
         if (policy != NULL)
                 lock->l_policy_data = *policy;
@@ -487,13 +487,13 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
         lock_res_and_lock(lock);
         /* Check that lock is not granted or failed, we might race. */
         if ((lock->l_req_mode != lock->l_granted_mode) &&
-            !(lock->l_flags & LDLM_FL_FAILED)) {
-                /* Make sure that this lock will not be found by raced
-                 * bl_ast and -EINVAL reply is sent to server anyways.
-                 * bug 17645 */
-                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
-                                 LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
-                need_cancel = 1;
+           !ldlm_is_failed(lock)) {
+               /* Make sure that this lock will not be found by raced
+                * bl_ast and -EINVAL reply is sent to server anyways.
+                * b=17645*/
+               lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+                                LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+               need_cancel = 1;
         }
         unlock_res_and_lock(lock);
 
@@ -604,7 +604,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
 
        *flags = ldlm_flags_from_wire(reply->lock_flags);
        lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
-                                             LDLM_INHERIT_FLAGS);
+                                             LDLM_FL_INHERIT_MASK);
         /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
          * to wait with no timeout as well */
        lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
@@ -657,7 +657,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
              * bug 7311). */
             (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
                 lock_res_and_lock(lock);
-                lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
+               lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
                 unlock_res_and_lock(lock);
                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
         }
@@ -1127,13 +1127,13 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
                 LDLM_DEBUG(lock, "client-side cancel");
                 /* Set this flag to prevent others from getting new references*/
                 lock_res_and_lock(lock);
-                lock->l_flags |= LDLM_FL_CBPENDING;
+               ldlm_set_cbpending(lock);
                local_only = !!(lock->l_flags &
                                (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
-                ldlm_cancel_callback(lock);
-                rc = (lock->l_flags & LDLM_FL_BL_AST) ?
-                        LDLM_FL_BL_AST : LDLM_FL_CANCELING;
-                unlock_res_and_lock(lock);
+               ldlm_cancel_callback(lock);
+               rc = (ldlm_is_bl_ast(lock)) ?
+                       LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+               unlock_res_and_lock(lock);
 
                 if (local_only) {
                         CDEBUG(D_DLMTRACE, "not sending request (at caller's "
@@ -1461,7 +1461,7 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
                                 break;
                 default:
                         result = LDLM_POLICY_SKIP_LOCK;
-                        lock->l_flags |= LDLM_FL_SKIPPED;
+                       ldlm_set_skipped(lock);
                         break;
         }
 
@@ -1661,16 +1661,16 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
                                             l_lru) {
                         /* No locks which got blocking requests. */
-                        LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+                       LASSERT(!ldlm_is_bl_ast(lock));
 
                         if (flags & LDLM_CANCEL_NO_WAIT &&
-                            lock->l_flags & LDLM_FL_SKIPPED)
+                           ldlm_is_skipped(lock))
                                 /* already processed */
                                 continue;
 
                        /* Somebody is already doing CANCEL. No need for this
                         * lock in LRU, do not traverse it again. */
-                        if (!(lock->l_flags & LDLM_FL_CANCELING))
+                       if (!ldlm_is_canceling(lock))
                                 break;
 
                         ldlm_lock_remove_from_lru_nolock(lock);
@@ -1713,15 +1713,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
 
                lock_res_and_lock(lock);
                /* Check flags again under the lock. */
-               if ((lock->l_flags & LDLM_FL_CANCELING) ||
+               if (ldlm_is_canceling(lock) ||
                    (ldlm_lock_remove_from_lru(lock) == 0)) {
                        /* Another thread is removing lock from LRU, or
                         * somebody is already doing CANCEL, or there
                         * is a blocking request which will send cancel
                         * by itself, or the lock is no longer unused. */
                        unlock_res_and_lock(lock);
-                       lu_ref_del(&lock->l_reference,
-                                  __FUNCTION__, current);
+                       lu_ref_del(&lock->l_reference, __FUNCTION__, current);
                        LDLM_LOCK_RELEASE(lock);
                        spin_lock(&ns->ns_lock);
                        continue;
@@ -1733,7 +1732,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 * frees appropriate state. This might lead to a race
                 * where while we are doing cancel here, server is also
                 * silently cancelling this lock. */
-               lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+               ldlm_clear_cancel_on_block(lock);
 
                /* Setting the CBPENDING flag is a little misleading,
                 * but prevents an important race; namely, once
@@ -1830,9 +1829,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
 
                /* If somebody is already doing CANCEL, or blocking AST came,
                 * skip this lock. */
-                if (lock->l_flags & LDLM_FL_BL_AST ||
-                    lock->l_flags & LDLM_FL_CANCELING)
-                        continue;
+               if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
+                       continue;
 
                 if (lockmode_compat(lock->l_granted_mode, mode))
                         continue;
@@ -1844,9 +1842,9 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
                       policy->l_inodebits.bits))
                         continue;
 
-                /* See CBPENDING comment in ldlm_cancel_lru */
-                lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
-                                 lock_flags;
+               /* See CBPENDING comment in ldlm_cancel_lru */
+               lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
+                                lock_flags;
 
                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
                 cfs_list_add(&lock->l_bl_ast, cancels);
@@ -2194,7 +2192,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
 
 
         /* Bug 11974: Do not replay a lock which is actively being canceled */
-        if (lock->l_flags & LDLM_FL_CANCELING) {
+       if (ldlm_is_canceling(lock)) {
                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
                 RETURN(0);
         }
@@ -2202,7 +2200,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         /* If this is reply-less callback lock, we cannot replay it, since
          * server might have long dropped it, but notification of that event was
          * lost by network. (and server granted conflicting lock already) */
-        if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+       if (ldlm_is_cancel_on_block(lock)) {
                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
                 ldlm_lock_cancel(lock);
                 RETURN(0);
index 69f8e6d..559bfdb 100644 (file)
@@ -728,12 +728,12 @@ static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
                 cfs_list_for_each(tmp, q) {
                         lock = cfs_list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
-                        if (lock->l_flags & LDLM_FL_CLEANED) {
+                       if (ldlm_is_cleaned(lock)) {
                                 lock = NULL;
                                 continue;
                         }
                         LDLM_LOCK_GET(lock);
-                        lock->l_flags |= LDLM_FL_CLEANED;
+                       ldlm_set_cleaned(lock);
                         break;
                 }
 
@@ -744,13 +744,13 @@ static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
 
                 /* Set CBPENDING so nothing in the cancellation path
                 * can match this lock. */
-                lock->l_flags |= LDLM_FL_CBPENDING;
-                lock->l_flags |= LDLM_FL_FAILED;
+               ldlm_set_cbpending(lock);
+               ldlm_set_failed(lock);
                 lock->l_flags |= flags;
 
                 /* ... without sending a CANCEL message for local_only. */
                 if (local_only)
-                        lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+                       ldlm_set_local_only(lock);
 
                 if (local_only && (lock->l_readers || lock->l_writers)) {
                         /* This is a little bit gross, but much better than the
@@ -1315,7 +1315,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
 
        LDLM_DEBUG(lock, "About to add this lock:\n");
 
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
                return;
        }
@@ -1340,7 +1340,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
         ldlm_resource_dump(D_INFO, res);
         LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
 
-       if (new->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(new)) {
                CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
                goto out;
        }
index 7ea2f73..2518b6a 100644 (file)
@@ -131,11 +131,8 @@ int ll_dcompare(struct dentry *parent, struct qstr *d_name, struct qstr *name)
 
 static inline int return_if_equal(struct ldlm_lock *lock, void *data)
 {
-        if ((lock->l_flags &
-             (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
-            (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
-                return LDLM_ITER_CONTINUE;
-        return LDLM_ITER_STOP;
+       return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
+               LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
 }
 
 /* find any ldlm lock of the inode in mdc and lov
index 432f685..c98ead8 100644 (file)
@@ -3579,10 +3579,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
        ENTRY;
 
        CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
-              PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+              PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
               lock->l_lvb_data, lock->l_lvb_len);
 
-       if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+       if ((lock->l_lvb_data != NULL) && ldlm_is_lvb_ready(lock))
                RETURN(0);
 
        /* if layout lock was granted right away, the layout is returned
@@ -3662,7 +3662,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
        md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
 
        lock_res_and_lock(lock);
-       lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+       lvb_ready = ldlm_is_lvb_ready(lock);
        unlock_res_and_lock(lock);
        /* checking lvb_ready is racy but this is okay. The worst case is
         * that multi processes may configure the file on the same time. */
index aaf864b..883c873 100644 (file)
@@ -226,7 +226,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                if (inode == NULL)
                        break;
 
-               LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+               LASSERT(ldlm_is_canceling(lock));
 
                if (bits & MDS_INODELOCK_XATTR)
                        ll_xattr_cache_destroy(inode);
index a03a1d9..839dfa5 100644 (file)
@@ -127,7 +127,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
         * ast.
         */
        if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
-                  ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+                  !ldlm_is_destroyed(olock)))
                return 0;
 
        if (! ergo(ols->ols_state == OLS_GRANTED,
@@ -1321,42 +1321,41 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
 static void osc_lock_cancel(const struct lu_env *env,
                             const struct cl_lock_slice *slice)
 {
-        struct cl_lock   *lock    = slice->cls_lock;
-        struct osc_lock  *olck    = cl2osc_lock(slice);
-        struct ldlm_lock *dlmlock = olck->ols_lock;
-        int               result  = 0;
-        int               discard;
+       struct cl_lock   *lock    = slice->cls_lock;
+       struct osc_lock  *olck    = cl2osc_lock(slice);
+       struct ldlm_lock *dlmlock = olck->ols_lock;
 
-        LASSERT(cl_lock_is_mutexed(lock));
-        LINVRNT(osc_lock_invariant(olck));
+       LASSERT(cl_lock_is_mutexed(lock));
+       LINVRNT(osc_lock_invariant(olck));
 
-        if (dlmlock != NULL) {
-                int do_cancel;
+       if (dlmlock != NULL) {
+               bool do_cancel;
+               int  result = 0;
 
-                discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
                if (olck->ols_state >= OLS_GRANTED)
-                       result = osc_lock_flush(olck, discard);
-                osc_lock_unhold(olck);
-
-                lock_res_and_lock(dlmlock);
-                /* Now that we're the only user of dlm read/write reference,
-                 * mostly the ->l_readers + ->l_writers should be zero.
-                 * However, there is a corner case.
-                 * See bug 18829 for details.*/
-                do_cancel = (dlmlock->l_readers == 0 &&
-                             dlmlock->l_writers == 0);
-                dlmlock->l_flags |= LDLM_FL_CBPENDING;
-                unlock_res_and_lock(dlmlock);
-                if (do_cancel)
+                       result = osc_lock_flush(olck,
+                               ldlm_is_discard_data(dlmlock));
+               osc_lock_unhold(olck);
+
+               lock_res_and_lock(dlmlock);
+               /* Now that we're the only user of dlm read/write reference,
+                * mostly the ->l_readers + ->l_writers should be zero.
+                * However, there is a corner case.
+                * See b=18829 for details.*/
+               do_cancel = (dlmlock->l_readers == 0 &&
+                            dlmlock->l_writers == 0);
+               ldlm_set_cbpending(dlmlock);
+               unlock_res_and_lock(dlmlock);
+               if (do_cancel)
                        result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
-                if (result < 0)
-                        CL_LOCK_DEBUG(D_ERROR, env, lock,
-                                      "lock %p cancel failure with error(%d)\n",
-                                      lock, result);
-        }
-        olck->ols_state = OLS_CANCELLED;
-        olck->ols_flags &= ~LDLM_FL_LVB_READY;
-        osc_lock_detach(env, olck);
+               if (result < 0)
+                       CL_LOCK_DEBUG(D_ERROR, env, lock,
+                                     "lock %p cancel failure with error(%d)\n",
+                                     lock, result);
+       }
+       olck->ols_state = OLS_CANCELLED;
+       olck->ols_flags &= ~LDLM_FL_LVB_READY;
+       osc_lock_detach(env, olck);
 }
 
 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
index 582ba19..44ff796 100644 (file)
@@ -2539,7 +2539,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
         if (mode) {
                 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
 
-                if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
+               if ((agl != 0) && !ldlm_is_lvb_ready(matched)) {
                         /* For AGL, if enqueue RPC is sent but the lock is not
                          * granted, then skip to process this strpe.
                          * Return -ECANCELED to tell the caller. */
index 6e2a3ff..a3edbdc 100644 (file)
@@ -643,7 +643,7 @@ static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
                        RETURN(-ENOLCK);
                }
 
-               if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) {
+               if (ldlm_is_ast_sent(lock)) {
                        struct ptlrpc_service_part      *svc;
                        unsigned int                     timeout;
 
index 75ce7ea..f6790a9 100644 (file)
@@ -213,7 +213,7 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
                /* kick off reintegration thread if not running already, if
                 * it's just local cancel (for stack clean up or eviction),
                 * don't re-trigger the reintegration. */
-               if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) == 0)
+               if (!ldlm_is_local_only(lock))
                        qsd_start_reint_thread(qqi);
 
                lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
@@ -321,7 +321,7 @@ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *de
 
                /* just local cancel (for stack clean up or eviction), don't
                 * release quota space in this case */
-               if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) != 0) {
+               if (ldlm_is_local_only(lock)) {
                        lqe_putref(lqe);
                        break;
                }