.tmp_versions
.Xrefs
*~
+\.#*
+\#*
*.a
*.dSYM
*.i
# NOTE: Please avoid bashisms (bash specific syntax) in this script
+# die a horrible death. All output goes to stderr.
+#
+die()
+{
+ echo "bootstrap failure: $*"
+ echo Aborting
+ exit 1
+} 1>&2
+
+run_cmd()
+{
+ echo -n "Running $*"
+ eval "$@" || die "command exited with code $?"
+ echo
+}
+
# install Lustre Git commit hooks by default - LU-2083
for HOOK in commit-msg prepare-commit-msg; do
if [ -d .git/hooks -a ! -e .git/hooks/$HOOK ]; then
CONFIGURE_DIRS="libsysio lustre-iokit ldiskfs"
for dir in $REQUIRED_DIRS ; do
- if [ ! -d "$dir" ] ; then
- cat >&2 <<EOF
-Your tree seems to be missing $dir.
-Please read README.lustrecvs for details.
-EOF
- exit 1
- fi
+ test -d "$dir" || \
+ die "Your tree seems to be missing $dir.
+Please read README.lustrecvs for details."
+
ACLOCAL_FLAGS="$ACLOCAL_FLAGS -I $PWD/$dir/autoconf"
done
# optional directories for Lustre
fi
done
-run_cmd()
-{
- cmd="$@"
- echo -n "Running $cmd"
- eval $cmd
- res=$?
- if [ $res -ne 0 ]; then
- echo " failed: $res"
- echo "Aborting"
- exit 1
- fi
- echo
-}
+PWD_SAVE=$PWD
run_cmd "aclocal -I $PWD/config $ACLOCAL_FLAGS"
run_cmd "autoheader"
run_cmd "automake -a -c"
run_cmd autoconf
-# Run autogen.sh in these directories
-PWD_SAVE=$PWD
+# bootstrap in these directories
for dir in $CONFIGURE_DIRS; do
if [ -d $dir ] ; then
cd $dir
- echo "Running autogen for $dir..."
+ echo "bootstrapping in $dir..."
run_cmd "sh autogen.sh"
fi
cd $PWD_SAVE
--- /dev/null
+lustre_dlm_flags.[ch]
+!Makefile
--- /dev/null
+
+top_builddir = ../..
+src = lustre_dlm_flags.def lustre_dlm_flags.tpl
+targ = lustre_dlm_flags.h
+cropt = -nostartfiles -shared -rdynamic -fPIC
+
+default : $(targ)
+all : $(targ) flags crash
+
+$(targ) : $(src)
+ @set -x ; \
+ if autogen --version >/dev/null 2>&1 ; then \
+ autogen -DTESTING=1 lustre_dlm_flags.def ; \
+ rm -f $(top_builddir)/lustre/include/$@ ; \
+ sed '/It has been AutoGen-ed/s/-ed.*/-ed/;s/ *$$//' \
+ $@ > $(top_builddir)/lustre/include/$@ ; \
+ else cp $(top_builddir)/lustre/include/$@ . ; fi
+
+install : $(targ) install.sh
+ DESTDIR="$(DESTDIR)" ./install.sh
+
+clean :
+ rm -f *~ *flags.[hc] ag-log*
+
+clobber :
+ git clean -f -x -d .
+
+flags : lustre_dlm_flags
+lustre_dlm_flags : $(targ)
+ $(CC) -o $@ -g3 -O0 -DLDLM_FLAGS_PROGRAM=1 lustre_dlm_flags.c
+
+crash : lustre-crash-ext.so
+lustre-crash-ext.so : $(targ)
+ $(CC) -g3 -O0 $(cropt) -o $@ lustre_dlm_flags.c
+
+.PHONY : crash flags
--- /dev/null
+AutoGen Definitions lustre_dlm_flags.tpl;
+
+flag[ 0] = {
+ f-name = lock_changed;
+ f-mask = on_wire;
+ f-desc = 'extent, mode, or resource changed';
+};
+
+flag[ 1] = {
+ f-name = block_granted;
+ f-mask = on_wire, blocked;
+ f-desc = 'Server placed lock on granted list, or a recovering client wants '
+ 'the lock added to the granted list, no questions asked.';
+};
+
+flag[ 2] = {
+ f-name = block_conv;
+ f-mask = on_wire, blocked;
+ f-desc = <<- _EOF_
+ Server placed lock on conv list, or a recovering client wants the lock
+ added to the conv list, no questions asked.
+ _EOF_;
+};
+
+flag[ 3] = {
+ f-name = block_wait;
+ f-mask = on_wire, blocked;
+ f-desc = <<- _EOF_
+ Server placed lock on wait list, or a recovering client wants
+ the lock added to the wait list, no questions asked.
+ _EOF_;
+};
+
+// Skipped bit 4
+
+flag[ 5] = {
+ f-name = ast_sent;
+ f-mask = on_wire;
+ f-desc = 'blocking or cancel packet was queued for sending.';
+};
+
+// Skipped bits 6 and 7
+
+flag[ 8] = {
+ f-name = replay;
+ f-mask = on_wire;
+ f-desc = <<- _EOF_
+ Lock is being replayed. This could probably be implied by the fact that
+ one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
+ _EOF_;
+};
+
+flag[ 9] = {
+ f-name = intent_only;
+ f-mask = on_wire;
+ f-desc = "Don't grant lock, just do intent.";
+};
+
+// Skipped bits 10 and 11
+
+flag[12] = {
+ f-name = has_intent;
+ f-mask = on_wire;
+ f-desc = 'lock request has intent';
+};
+
+// Skipped bits 13, 14 and 15
+
+flag[16] = {
+ f-name = discard_data;
+ f-mask = on_wire;
+ f-desc = 'discard (no writeback) on cancel';
+};
+
+flag[17] = {
+ f-name = no_timeout;
+ f-mask = on_wire;
+ f-desc = 'Blocked by group lock - wait indefinitely';
+};
+
+flag[18] = {
+ f-name = block_nowait;
+ f-mask = on_wire;
+ f-desc = <<- _EOF_
+ Server told not to wait if blocked. For AGL, OST will not send
+ glimpse callback.
+ _EOF_;
+};
+
+flag[19] = {
+ f-name = test_lock;
+ f-mask = on_wire;
+ f-desc = 'return blocking lock';
+};
+
+// Skipped bits 20, 21, and 22
+
+flag[23] = {
+ f-name = cancel_on_block;
+ f-mask = on_wire, inherit;
+ f-desc = <<- _EOF_
+ Immediatelly cancel such locks when they block some other locks. Send
+ cancel notification to original lock holder, but expect no reply. This is
+ for clients (like liblustre) that cannot be expected to reliably response
+ to blocking AST.
+ _EOF_;
+};
+
+// Skipped bits 24 through 29
+
+flag[30] = {
+ f-name = deny_on_contention;
+ f-mask = on_wire;
+ f-desc = 'measure lock contention and return -EUSERS if locking contention '
+ 'is high';
+};
+
+flag[31] = {
+ f-name = ast_discard_data;
+ f-mask = on_wire, ast;
+ f-desc = <<- _EOF_
+ These are flags that are mapped into the flags and ASTs of blocking locks
+ Add FL_DISCARD to blocking ASTs
+ _EOF_;
+};
+
+flag[32] = {
+ f-name = fail_loc;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ Used for marking lock as a target for -EINTR while cp_ast sleep
+ emulation + race with upcoming bl_ast.
+ _EOF_;
+};
+
+flag[33] = {
+ f-name = skipped;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ Used while processing the unused list to know that we have already
+ handled this lock and decided to skip it.
+ _EOF_;
+};
+
+
+flag[34] = {
+ f-name = cbpending;
+ f-mask = local_only, hide_lock;
+ f-desc = 'this lock is being destroyed';
+};
+
+flag[35] = {
+ f-name = wait_noreproc;
+ f-mask = local_only;
+ f-desc = 'not a real flag, not saved in lock';
+};
+
+flag[36] = {
+ f-name = cancel;
+ f-mask = local_only;
+ f-desc = 'cancellation callback already run';
+};
+
+flag[37] = {
+ f-name = local_only;
+ f-mask = local_only, hide_lock;
+ f-desc = 'whatever it might mean';
+};
+
+flag[38] = {
+ f-name = failed;
+ f-mask = local_only, gone, hide_lock;
+ f-desc = "don't run the cancel callback under ldlm_cli_cancel_unused";
+};
+
+flag[39] = {
+ f-name = canceling;
+ f-mask = local_only;
+ f-desc = 'lock cancel has already been sent';
+};
+
+flag[40] = {
+ f-name = local;
+ f-mask = local_only;
+ f-desc = 'local lock (ie, no srv/cli split)';
+};
+
+flag[41] = {
+ f-name = lvb_ready;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ XXX FIXME: This is being added to b_size as a low-risk fix to the
+ fact that the LVB filling happens _after_ the lock has been granted,
+ so another thread can match it before the LVB has been updated. As
+ a dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB
+ poop. this is only needed on LOV/OSC now, where LVB is actually
+ used and callers must set it in input flags.
+
+ The proper fix is to do the granting inside of the completion AST,
+ which can be replaced with a LVB-aware wrapping function for OSC
+ locks. That change is pretty high-risk, though, and would need a
+ lot more testing.
+ _EOF_;
+};
+
+flag[42] = {
+ f-name = kms_ignore;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ A lock contributes to the known minimum size (KMS) calculation until
+ it has finished the part of its cancelation that performs write back
+ on its dirty pages. It can remain on the granted list during this
+ whole time. Threads racing to update the KMS after performing their
+ writeback need to know to exclude each other's locks from the
+ calculation as they walk the granted list.
+ _EOF_;
+};
+
+flag[43] = {
+ f-name = cp_reqd;
+ f-mask = local_only;
+ f-desc = 'completion AST to be executed';
+};
+
+flag[44] = {
+ f-name = cleaned;
+ f-mask = local_only;
+ f-desc = 'cleanup_resource has already handled the lock';
+};
+
+flag[45] = {
+ f-name = atomic_cb;
+ f-mask = local_only, hide_lock;
+ f-desc = <<- _EOF_
+ optimization hint: LDLM can run blocking callback from current context
+ w/o involving separate thread. in order to decrease cs rate
+ _EOF_;
+};
+
+flag[46] = {
+ f-name = bl_ast;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ It may happen that a client initiates two operations, e.g. unlink
+ and mkdir, such that the server sends a blocking AST for conflicting
+ locks to this client for the first operation, whereas the second
+ operation has canceled this lock and is waiting for rpc_lock which
+ is taken by the first operation. LDLM_FL_BL_AST is set by
+ ldlm_callback_handler() in the lock to prevent the Early Lock Cancel
+ (ELC) code from cancelling it.
+
+ LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock
+ cache is dropped to let ldlm_callback_handler() return EINVAL to the
+ server. It is used when ELC RPC is already prepared and is waiting
+ for rpc_lock, too late to send a separate CANCEL RPC.
+ _EOF_;
+};
+
+flag[47] = {
+ f-name = bl_done;
+ f-mask = local_only;
+ f-desc = 'whatever it might mean';
+};
+
+flag[48] = {
+ f-name = no_lru;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ Don't put lock into the LRU list, so that it is not canceled due to
+ aging. Used by MGC locks, they are cancelled only at unmount or by
+ callback.
+ _EOF_;
+};
+
+flag[49] = {
+ f-name = fail_notified;
+ f-mask = local_only, gone;
+ f-desc = <<- _EOF_
+ Set for locks that failed and where the server has been notified.
+
+ Protected by lock and resource locks.
+ _EOF_;
+};
+
+flag[50] = {
+ f-name = destroyed;
+ f-mask = local_only, gone;
+ f-desc = <<- _EOF_
+ Set for locks that were removed from class hash table and will be
+ destroyed when last reference to them is released. Set by
+ ldlm_lock_destroy_internal().
+
+ Protected by lock and resource locks.
+ _EOF_;
+};
+
+flag[51] = {
+ f-name = server_lock;
+ f-mask = local_only;
+ f-desc = 'flag whether this is a server namespace lock';
+};
+
+flag[52] = {
+ f-name = res_locked;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+
+ NB: compared with check_res_locked(), checking this bit is cheaper.
+ Also, spin_is_locked() is deprecated for kernel code; one reason is
+ because it works only for SMP so user needs to add extra macros like
+ LASSERT_SPIN_LOCKED for uniprocessor kernels.
+ _EOF_;
+};
+
+flag[53] = {
+ f-name = waited;
+ f-mask = local_only;
+ f-desc = <<- _EOF_
+ It's set once we call ldlm_add_waiting_lock_res_locked()
+ to start the lock-timeout timer and it will never be reset.
+
+ Protected by lock and resource locks.
+ _EOF_;
+};
+
+flag[54] = {
+ f-name = ns_srv;
+ f-mask = local_only;
+ f-desc = 'Flag whether this is a server namespace lock.';
+};
--- /dev/null
+[= AutoGen5 Template h -*- Mode: C -*- =]
+[= (dne " * " "/* ") =]
+ *
+[= (gpl "lustre" " * ") =]
+ */
+/**
+ * \file [= (out-name) =]
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * This file is derived from flag definitions in [=(def-file)=].
+ * The format is defined in the [=(tpl-file)=] template file.
+ *
+ * \addtogroup LDLM Lustre Distributed Lock Manager
+ * @{
+ *
+ * \name flags
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * @{
+ */
+#ifndef LDLM_ALL_FLAGS_MASK
+[=
+
+;; Guile is unable to handle 64 bit unsigned ints very easily.
+;; BASH does just fine. Construct a shell script to compute the
+;; bit masks and echo out the appropriate #defines.
+;;
+(out-push-new "script.sh")
+
+=]
+mask_list=
+allbits=0
+fmt='#define LDLM_FL_%-16s 0x%016XULL // bit %2u
+#define ldlm_is_%-20s LDLM_TEST_FLAG(( _l), 1ULL << %2u)
+#define ldlm_set_%-20s LDLM_SET_FLAG(( _l), 1ULL << %2u)
+#define ldlm_clear_%-20s LDLM_CLEAR_FLAG((_l), 1ULL << %2u)\n'
+acc_fmt=''
+tmpfile=[=(base-name)=]-$$.tmp
+exec 8>&1 1> $tmpfile
+[=
+
+FOR flag
+
+=]
+bitno=[=(define temp-txt (get "f-desc"))
+ (for-index)=]
+bitval=$(( 1 << $bitno ))
+echo[=
+
+ IF (< (string-length temp-txt) 72)=]
+echo '/**' [= (raw-shell-str temp-txt) =] '*/'[=
+ ELSE=]
+echo '/**'
+{ fmt -w 74 | sed 's/^/ * /;s/ *$//;$s@$@ */@'
+} <<_EOF_
+[=(. temp-txt)=]
+_EOF_[=
+ ENDIF
+
+=]
+dn_name=[= (string-downcase! (string->c-name! (get "f-name"))) =]'(_l)'
+up_name=[= (string-upcase! (string->c-name! (get "f-name"))) =]
+printf "$fmt" $up_name $bitval $bitno \
+ $dn_name $bitno \
+ $dn_name $bitno \
+ $dn_name $bitno
+
+(( allbits += bitval ))[=
+
+ FOR f-mask =]
+mask_list=${mask_list}[= f-mask =]$'\n'
+mask_[= f-mask =]=$(( ${mask_[= f-mask =]:-0} + bitval ))[=
+ ENDFOR f-mask =][=
+
+ENDFOR flag
+
+=]
+exec 1>&8 8>&-
+fmt='\n/** l_flags bits marked as "%s" bits */
+#define LDLM_FL_%-22s 0x%016XULL\n'
+printf "$fmt" all_flags ALL_FLAGS_MASK $allbits
+
+for f in $(echo "$mask_list" | sort -u)
+do
+ ucf=$(echo $f | tr a-z A-Z)_MASK
+ eval v=\$mask_$f
+ printf "$fmt" $f $ucf $v
+done
+
+cat $tmpfile
+rm -f $tmpfile script.sh[=
+
+;; The script is done. Pop off the temporary output, handing
+;; it to the shell for evaluation. stdout becomes the output text
+;;
+(out-pop)
+(shell ". script.sh")
+
+=]
+
+/** test for ldlm_lock flag bit set */
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** set a ldlm_lock flag bit */
+#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+
+/** clear a ldlm_lock flag bit */
+#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
+
+/** Mask of flags inherited from parent lock when doing intents. */
+#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
+
+/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
+#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+
+/** @} subgroup */
+/** @} group */
+#ifdef WIRESHARK_COMPILE[=
+FOR flag =][=
+ (sprintf "\nstatic int hf_lustre_ldlm_fl_%-20s= -1;"
+ (string-downcase! (get "f-name")) ) =][=
+ENDFOR flag =]
+
+const value_string lustre_ldlm_flags_vals[] = {[=
+
+FOR flag =][=
+ (define up-name (string-upcase! (string->c-name! (get "f-name"))))
+ (sprintf "\n {LDLM_FL_%-20s \"LDLM_FL_%s\"}," (string-append up-name ",")
+ up-name) =][=
+ENDFOR flag =]
+ { 0, NULL }
+};
+#endif /* WIRESHARK_COMPILE */
+[= #
+
+// TEST CODE =][=
+IF (getenv "TESTING") =][=
+
+FOR flag (define len-list "")
+ (define str-list "")
+ (define temp-str "")
+ (define header-name (out-name))
+ (out-push-new (string-append (base-name) ".c"))
+
+ (for-from 0) (for-by 1) =][=
+
+ (if (exist? "f-name")
+ (begin
+ (set! temp-str (string-upcase! (get "f-name")))
+ (set! len-list (string-append len-list (c-string
+ (sprintf "%%%us" (- 20 (string-length temp-str))) ) "\n" ))
+ (set! str-list (string-append str-list
+ (c-string temp-str) "\n" ))
+ )
+ (begin
+ (set! len-list (string-append len-list "NULL\n"))
+ (set! str-list (string-append str-list "NULL\n"))
+ ) )
+
+ =][=
+
+ENDFOR flag
+
+\=]
+#include "[=(. header-name)=]"
+extern char ** args;
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static char const * const fill_fmts[] = {
+[=(out-push-new)=]
+columns --spread=1 -I4 -S, --end=' };' <<_EOF_
+[=(. len-list)=]
+_EOF_
+
+printf '\nstatic char const * const flag_names[] = {\n'
+columns --spread=1 -I4 -S, --end=' };' <<_EOF_
+[=(. str-list)=]
+_EOF_
+[= (shell (out-pop #t)) =]
+
+static void
+print_bits(unsigned long long v)
+{
+ static char const new_line[] = "\n";
+ char const * space_fmt = new_line + 1;
+ int ix = 0;
+ int ct = 0;
+
+ if ((v & ~LDLM_FL_ALL_FLAGS_MASK) != 0) {
+ unsigned long long wrong = v & ~LDLM_FL_ALL_FLAGS_MASK;
+ printf("undefined bits: 0x%016llX\n", wrong);
+ v &= LDLM_FL_ALL_FLAGS_MASK;
+ }
+
+ for (ix = 0; v != 0ULL; ix++, v >>= 1) {
+ if ((v & 0x1ULL) == 0)
+ continue;
+
+ printf(space_fmt, "");
+ if ((++ct & 0x03) == 0)
+ space_fmt = new_line;
+ else
+ space_fmt = fill_fmts[ix];
+ fputs(flag_names[ix], stdout);
+ }
+ putc('\n', stdout);
+}
+
+void
+cmd_ldlm_lock_flags(void)
+{
+ char * p = args[1];
+ char * e;
+ unsigned long long v;
+ bool flip_val = false;
+
+ if (p == NULL) {
+ printf("no argument\n");
+ return;
+ }
+ if (*p == '~') {
+ flip_val = true;
+ p++;
+ }
+
+ v = strtoull(p, &e, 0);
+ if (*e != '\0') {
+ errno = 0;
+ v = strtoull(p, &e, 16);
+ if ((errno != 0) || (*e != '\0')) {
+ printf("invalid number: %s\n", p);
+ return;
+ }
+ }
+ if (flip_val) {
+ v ^= ~0ULL;
+ v &= LDLM_FL_ALL_FLAGS_MASK;
+ }
+
+ print_bits(v);
+}
+
+char * help_ldlm_lock_flags[] = {
+ "ldlm_lock_flags",
+ "flag bit names for ldlm_lock",
+ "<numeric-value>",
+ "The names of the bits that are set in the numeric value are printed.",
+ NULL
+};
+
+#ifdef LDLM_FLAGS_PROGRAM
+#include <ctype.h>
+
+char ** args = NULL;
+
+void
+usage(int ex_code, char const * msg)
+{
+ int ix = 3;
+ FILE * fp = (ex_code == EXIT_SUCCESS) ? stdout : stderr;
+ args = help_ldlm_lock_flags;
+ if (msg != NULL)
+ fprintf(fp, "%s usage error: %s\n", args[0], msg);
+ fprintf(fp, "%s - %s\n", args[0], args[1]);
+ fprintf(fp, "Usage: %s %s\n", args[0], args[2]);
+ for (;;) {
+ char * txt = args[ix++];
+ if (txt == NULL)
+ break;
+ fprintf(fp, "%s\n", txt);
+ }
+ exit(ex_code);
+}
+
+int
+main(int argc, char ** argv)
+{
+ int ix = 1;
+ char * av[3] = { argv[0], NULL, NULL };
+
+ args = av;
+ switch (argc) {
+ case 0: case 1:
+ usage(EXIT_FAILURE, "argument missing");
+
+ case 2:
+ {
+ char * arg = argv[1];
+ if (*arg != '-')
+ break;
+ switch (arg[1]) {
+ case '-': if (arg[2] == 'h') break;
+ /* FALLTHROUGH */
+ case 'h': usage(EXIT_SUCCESS, NULL);
+ default: break;
+ }
+ break;
+ }
+ }
+
+ while (ix < argc) {
+ av[1] = argv[ix++];
+ cmd_ldlm_lock_flags();
+ }
+ return EXIT_SUCCESS;
+}
+#endif /* LDLM_FLAGS_PROGRAM */
+[= (out-pop) =][=
+
+ENDIF TESTING
+
+ * Local Variables:
+ * mode: C
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * End:
+
+\=]
+#endif /* LDLM_ALL_FLAGS_MASK */
FLD_FIRST_OPC = FLD_QUERY
};
-#define LDLM_FL_LOCK_CHANGED 0x000001
-#define LDLM_FL_BLOCK_GRANTED 0x000002
-#define LDLM_FL_BLOCK_CONV 0x000004
-#define LDLM_FL_BLOCK_WAIT 0x000008
-#define LDLM_FL_CBPENDING 0x000010
-#define LDLM_FL_AST_SENT 0x000020
-#define LDLM_FL_WAIT_NOREPROC 0x000040
-#define LDLM_FL_CANCEL 0x000080
-#define LDLM_FL_REPLAY 0x000100
-#define LDLM_FL_INTENT_ONLY 0x000200
-#define LDLM_FL_LOCAL_ONLY 0x000400
-#define LDLM_FL_FAILED 0x000800
-#define LDLM_FL_HAS_INTENT 0x001000
-#define LDLM_FL_CANCELING 0x002000
-#define LDLM_FL_LOCAL 0x004000
-#define LDLM_FL_WARN 0x008000
-#define LDLM_FL_DISCARD_DATA 0x010000
-#define LDLM_FL_NO_TIMEOUT 0x020000
-#define LDLM_FL_BLOCK_NOWAIT 0x040000
-#define LDLM_FL_TEST_LOCK 0x080000
-#define LDLM_FL_LVB_READY 0x100000
-#define LDLM_FL_KMS_IGNORE 0x200000
-#define LDLM_FL_NO_LRU 0x400000
-#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
-#define LDLM_FL_CP_REQD 0x1000000
-#define LDLM_FL_CLEANED 0x2000000
-#define LDLM_FL_ATOMIC_CB 0x4000000
-#define LDLM_FL_BL_AST 0x10000000
-#define LDLM_FL_BL_DONE 0x20000000
-#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
-#define LDLM_AST_DISCARD_DATA 0x80000000
-
+#define WIRESHARK_COMPILE
+#include "lustre_dlm_flags.h"
#define LDLM_ENQUEUE (101)
#define LDLM_CONVERT (102)
static int hf_lustre_ost_lvb = -1 ;
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_warn = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_ast_discard_data = -1;
+#define hf_lustre_ldlm_ast_discard_data hf_lustre_ldlm_fl_ast_discard_data
static int hf_lustre_mdt_body = -1 ;
static int hf_lustre_mdt_body_fid1 = -1;
{0, NULL}
};
-const value_string lustre_ldlm_flags_vals[] = {
- {0x000001 , "LDLM_FL_LOCK_CHANGED"},
- {0x000002 , "LDLM_FL_BLOCK_GRANTED"},
- {0x000004 , "LDLM_FL_BLOCK_CONV"},
- {0x000008 , "LDLM_FL_BLOCK_WAIT"},
- {0x000010 , "LDLM_FL_CBPENDING"},
- {0x000020 , "LDLM_FL_AST_SENT"},
- {0x000040 , "LDLM_FL_WAIT_NOREPROC"},
- {0x000080 , "LDLM_FL_CANCEL"},
- {0x000100 , "LDLM_FL_REPLAY"},
- {0x000200 , "LDLM_FL_INTENT_ONLY"},
- {0x000400 , "LDLM_FL_LOCAL_ONLY"},
- {0x000800 , "LDLM_FL_FAILED"},
- {0x001000 , "LDLM_FL_HAS_INTENT"},
- {0x002000 , "LDLM_FL_CANCELING"},
- {0x004000 , "LDLM_FL_LOCAL"},
- {0x008000 , "LDLM_FL_WARN"},
- {0x010000 , "LDLM_FL_DISCARD_DATA"},
- {0x020000 , "LDLM_FL_NO_TIMEOUT"},
- {0x040000 , "LDLM_FL_BLOCK_NOWAIT"},
- {0x080000 , "LDLM_FL_TEST_LOCK"},
- {0x100000 , "LDLM_FL_LVB_READY"},
- {0x200000 , "LDLM_FL_KMS_IGNORE"},
- {0x400000 , "LDLM_FL_NO_LRU"},
- {0x800000 , "LDLM_FL_CANCEL_ON_BLOCK"},
- {0x1000000 , "LDLM_FL_CP_REQD"},
- {0x2000000 , "LDLM_FL_CLEANED"},
- {0x4000000 , "LDLM_FL_ATOMIC_CB"},
- {0x10000000 , "LDLM_FL_BL_AST"},
- {0x20000000 , "LDLM_FL_BL_DONE"},
- {0x40000000 , "LDLM_FL_DENY_ON_CONTENTION"},
- {0x80000000 , "LDLM_AST_DISCARD_DATA"},
- { 0, NULL }
-};
-
const value_string lustre_llog_op_type[] = {
{LLOG_PAD_MAGIC ,"LLOG_PAD_MAGIC "},
{OST_SZ_REC ,"OST_SZ_REC "},
item = proto_tree_add_item(parent_tree,hf_index, tvb, offset, 4, TRUE);
tree = proto_item_add_subtree(item, ett_lustre_ldlm_lock_flags);
}
- dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_ast_discard_data);
+ dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_ast_discard_data);
dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_deny_on_contention);
dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_done );
dissect_uint32(tvb, offset, pinfo, tree, hf_lustre_ldlm_fl_bl_ast );
{&hf_lustre_ldlm_fl_bl_ast, {"LDLM_FL_BL_AST", "lustre.ldlm_fl_bl_ast", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_AST, "", HFILL } },
{&hf_lustre_ldlm_fl_bl_done, {"LDLM_FL_BL_DONE", "lustre.ldlm_fl_bl_done", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_BL_DONE, "", HFILL } },
{&hf_lustre_ldlm_fl_deny_on_contention, {"LDLM_FL_DENY_ON_CONTENTION", "lustre.ldlm_fl_deny_on_contention", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_DENY_ON_CONTENTION, "", HFILL } },
- {&hf_lustre_ldlm_ast_discard_data, {"LDLM_AST_DISCARD_DATA", "lustre.ldlm_ast_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_AST_DISCARD_DATA, "", HFILL } },
+ {&hf_lustre_ldlm_fl_ast_discard_data, {"LDLM_AST_DISCARD_DATA", "lustre.ldlm_ast_discard_data", FT_BOOLEAN, 32, TFS(&lnet_flags_set_truth), LDLM_FL_AST_DISCARD_DATA, "", HFILL } },
{ &hf_lustre_obdo_o_misc,
{ "O Misc", "lustre.obdo.o_misc", FT_UINT32, BASE_DEC, NULL, 0, "", HFILL }},
lustre_fsfilt.h lustre_ha.h lustre_handles.h lustre_import.h \
lustre_lib.h lustre_sec.h lustre_lite.h lustre_log.h lustre_mds.h \
lustre_mdc.h lustre_net.h lustre_quota.h lvfs.h \
- obd_cache.h obd_class.h obd.h obd_lov.h \
+ obd_cache.h obd_class.h obd.h obd_lov.h lustre_dlm_flags.h \
obd_ost.h obd_support.h lustre_ver.h lu_object.h \
md_object.h dt_object.h lustre_param.h lustre_mdt.h \
lustre_fid.h lustre_fld.h lustre_req_layout.h lustre_capa.h \
#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
#include <lu_ref.h>
+#include "lustre_dlm_flags.h"
+
struct obd_ops;
struct obd_device;
} ldlm_side_t;
/**
- * Declaration of flags sent through the wire.
- **/
-#define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
-
-/**
- * If the server returns one of these flags, then the lock was put on that list.
- * If the client sends one of these flags (during recovery ONLY!), it wants the
- * lock added to the specified list, no questions asked.
- */
-#define LDLM_FL_BLOCK_GRANTED 0x000002
-#define LDLM_FL_BLOCK_CONV 0x000004
-#define LDLM_FL_BLOCK_WAIT 0x000008
-
-/* Used to be LDLM_FL_CBPENDING 0x000010 moved to non-wire flags */
-
-#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
- * queued for sending. */
-/* Used to be LDLM_FL_WAIT_NOREPROC 0x000040 moved to non-wire flags */
-/* Used to be LDLM_FL_CANCEL 0x000080 moved to non-wire flags */
-
-/**
- * Lock is being replayed. This could probably be implied by the fact that one
- * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
- */
-#define LDLM_FL_REPLAY 0x000100
-
-#define LDLM_FL_INTENT_ONLY 0x000200 /* Don't grant lock, just do intent. */
-
-/* Used to be LDLM_FL_LOCAL_ONLY 0x000400 moved to non-wire flags */
-/* Used to be LDLM_FL_FAILED 0x000800 moved to non-wire flags */
-
-#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
-
-/* Used to be LDLM_FL_CANCELING 0x002000 moved to non-wire flags */
-/* Used to be LDLM_FL_LOCAL 0x004000 moved to non-wire flags */
-
-#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
-
-#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
- * indefinitely */
-
-/** file & record locking */
-#define LDLM_FL_BLOCK_NOWAIT 0x040000 /* Server told not to wait if blocked.
- * For AGL, OST will not send glimpse
- * callback. */
-#define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
-
-/* Used to be LDLM_FL_LVB_READY 0x100000 moved to non-wire flags */
-/* Used to be LDLM_FL_KMS_IGNORE 0x200000 moved to non-wire flags */
-/* Used to be LDLM_FL_NO_LRU 0x400000 moved to non-wire flags */
-
-/* Immediatelly cancel such locks when they block some other locks. Send
- * cancel notification to original lock holder, but expect no reply. This is
- * for clients (like liblustre) that cannot be expected to reliably response
- * to blocking AST. */
-#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
-
-/* Flags flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
-
-/* Used to be LDLM_FL_CP_REQD 0x1000000 moved to non-wire flags */
-/* Used to be LDLM_FL_CLEANED 0x2000000 moved to non-wire flags */
-/* Used to be LDLM_FL_ATOMIC_CB 0x4000000 moved to non-wire flags */
-/* Used to be LDLM_FL_BL_AST 0x10000000 moved to non-wire flags */
-/* Used to be LDLM_FL_BL_DONE 0x20000000 moved to non-wire flags */
-
-/* measure lock contention and return -EUSERS if locking contention is high */
-#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
-
-/* These are flags that are mapped into the flags and ASTs of blocking locks */
-#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
-
-/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
-#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
-
-/*
- * --------------------------------------------------------------------------
- * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
- * 0x80000000 will not be sent over the wire.
- * --------------------------------------------------------------------------
- */
-
-/**
- * Declaration of flags not sent through the wire.
- **/
-
-/**
- * Used for marking lock as a target for -EINTR while cp_ast sleep
- * emulation + race with upcoming bl_ast.
- */
-#define LDLM_FL_FAIL_LOC 0x100000000ULL
-
-/**
- * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it.
- */
-#define LDLM_FL_SKIPPED 0x200000000ULL
-/* this lock is being destroyed */
-#define LDLM_FL_CBPENDING 0x400000000ULL
-/* not a real flag, not saved in lock */
-#define LDLM_FL_WAIT_NOREPROC 0x800000000ULL
-/* cancellation callback already run */
-#define LDLM_FL_CANCEL 0x1000000000ULL
-#define LDLM_FL_LOCAL_ONLY 0x2000000000ULL
-/* don't run the cancel callback under ldlm_cli_cancel_unused */
-#define LDLM_FL_FAILED 0x4000000000ULL
-/* lock cancel has already been sent */
-#define LDLM_FL_CANCELING 0x8000000000ULL
-/* local lock (ie, no srv/cli split) */
-#define LDLM_FL_LOCAL 0x10000000000ULL
-/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
- * the LVB filling happens _after_ the lock has been granted, so another thread
- * can match it before the LVB has been updated. As a dirty hack, we set
- * LDLM_FL_LVB_READY only after we've done the LVB poop.
- * this is only needed on LOV/OSC now, where LVB is actually used and callers
- * must set it in input flags.
- *
- * The proper fix is to do the granting inside of the completion AST, which can
- * be replaced with a LVB-aware wrapping function for OSC locks. That change is
- * pretty high-risk, though, and would need a lot more testing. */
-#define LDLM_FL_LVB_READY 0x20000000000ULL
-/* A lock contributes to the known minimum size (KMS) calculation until it has
- * finished the part of its cancelation that performs write back on its dirty
- * pages. It can remain on the granted list during this whole time. Threads
- * racing to update the KMS after performing their writeback need to know to
- * exclude each other's locks from the calculation as they walk the granted
- * list. */
-#define LDLM_FL_KMS_IGNORE 0x40000000000ULL
-/* completion AST to be executed */
-#define LDLM_FL_CP_REQD 0x80000000000ULL
-/* cleanup_resource has already handled the lock */
-#define LDLM_FL_CLEANED 0x100000000000ULL
-/* optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
-#define LDLM_FL_ATOMIC_CB 0x200000000000ULL
-
-/* It may happen that a client initiates two operations, e.g. unlink and
- * mkdir, such that the server sends a blocking AST for conflicting
- * locks to this client for the first operation, whereas the second
- * operation has canceled this lock and is waiting for rpc_lock which is
- * taken by the first operation. LDLM_FL_BL_AST is set by
- * ldlm_callback_handler() in the lock to prevent the Early Lock Cancel
- * (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock
- * cache is dropped to let ldlm_callback_handler() return EINVAL to the
- * server. It is used when ELC RPC is already prepared and is waiting
- * for rpc_lock, too late to send a separate CANCEL RPC. */
-#define LDLM_FL_BL_AST 0x400000000000ULL
-#define LDLM_FL_BL_DONE 0x800000000000ULL
-/* Don't put lock into the LRU list, so that it is not canceled due to aging.
- * Used by MGC locks, they are cancelled only at unmount or by callback. */
-#define LDLM_FL_NO_LRU 0x1000000000000ULL
-
-/**
* The blocking callback is overloaded to perform two functions. These flags
* indicate which operation should be performed.
*/
void *data);
/** Type for glimpse callback function of a lock. */
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
-/** Type for weight callback function of a lock. */
-typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
/** Work list for sending GL ASTs to multiple locks. */
struct ldlm_glimpse_work {
*/
ldlm_glimpse_callback l_glimpse_ast;
- /** XXX apparently unused "weight" handler. To be removed? */
- ldlm_weigh_callback l_weigh_ast;
-
/**
* Lock export.
* This is a pointer to actual client export for locks that were granted
ldlm_policy_data_t l_policy_data;
/**
- * Lock state flags.
- * Like whenever we receive any blocking requests for this lock, etc.
- * Protected by lr_lock.
+ * Lock state flags. Protected by lr_lock.
+ * \see lustre_dlm_flags.h where the bits are defined.
*/
__u64 l_flags;
+
/**
* Lock r/w usage counters.
* Protected by lr_lock.
/** Originally requested extent for the extent lock. */
struct ldlm_extent l_req_extent;
- unsigned int l_failed:1,
- /**
- * Set for locks that were removed from class hash table and will be
- * destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks.
- */
- l_destroyed:1,
- /*
- * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
- *
- * NB: compared with check_res_locked(), checking this bit is cheaper.
- * Also, spin_is_locked() is deprecated for kernel code; one reason is
- * because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels.
- */
- l_res_locked:1,
- /*
- * It's set once we call ldlm_add_waiting_lock_res_locked()
- * to start the lock-timeout timer and it will never be reset.
- *
- * Protected by lock_res_and_lock().
- */
- l_waited:1,
- /** Flag whether this is a server namespace lock. */
- l_ns_srv:1;
-
/*
* Client-side-only members.
*/
void *ei_cb_bl; /** blocking lock callback */
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
- void *ei_cb_wg; /** lock weigh callback */
void *ei_cbdata; /** Data to be passed into callbacks. */
};
ldlm_completion_callback lcs_completion;
ldlm_blocking_callback lcs_blocking;
ldlm_glimpse_callback lcs_glimpse;
- ldlm_weigh_callback lcs_weigh;
};
/* ldlm_lockd.c */
--- /dev/null
+/* -*- buffer-read-only: t -*- vi: set ro:
+ *
+ * DO NOT EDIT THIS FILE (lustre_dlm_flags.h)
+ *
+ * It has been AutoGen-ed
+ * From the definitions lustre_dlm_flags.def
+ * and the template file lustre_dlm_flags.tpl
+ *
+ * lustre is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * lustre is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/**
+ * \file lustre_dlm_flags.h
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * This file is derived from flag definitions in lustre_dlm_flags.def.
+ * The format is defined in the lustre_dlm_flags.tpl template file.
+ *
+ * \addtogroup LDLM Lustre Distributed Lock Manager
+ * @{
+ *
+ * \name flags
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * @{
+ */
+#ifndef LDLM_ALL_FLAGS_MASK
+
+/** l_flags bits marked as "all_flags" bits */
+#define LDLM_FL_ALL_FLAGS_MASK 0x007FFFFFC08F132FULL
+
+/** l_flags bits marked as "ast" bits */
+#define LDLM_FL_AST_MASK 0x0000000080000000ULL
+
+/** l_flags bits marked as "blocked" bits */
+#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
+
+/** l_flags bits marked as "gone" bits */
+#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
+
+/** l_flags bits marked as "hide_lock" bits */
+#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
+
+/** l_flags bits marked as "inherit" bits */
+#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
+
+/** l_flags bits marked as "local_only" bits */
+#define LDLM_FL_LOCAL_ONLY_MASK 0x007FFFFF00000000ULL
+
+/** l_flags bits marked as "on_wire" bits */
+#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
+
+/** extent, mode, or resource changed */
+#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
+#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 0)
+#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG(( _l), 1ULL << 0)
+#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
+
+/**
+ * Server placed lock on granted list, or a recovering client wants the
+ * lock added to the granted list, no questions asked. */
+#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL // bit 1
+#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG(( _l), 1ULL << 1)
+#define ldlm_set_block_granted(_l) LDLM_SET_FLAG(( _l), 1ULL << 1)
+#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
+
+/**
+ * Server placed lock on conv list, or a recovering client wants the lock
+ * added to the conv list, no questions asked. */
+#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL // bit 2
+#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 2)
+#define ldlm_set_block_conv(_l) LDLM_SET_FLAG(( _l), 1ULL << 2)
+#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
+
+/**
+ * Server placed lock on wait list, or a recovering client wants the lock
+ * added to the wait list, no questions asked. */
+#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL // bit 3
+#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 3)
+#define ldlm_set_block_wait(_l) LDLM_SET_FLAG(( _l), 1ULL << 3)
+#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
+
+/** blocking or cancel packet was queued for sending. */
+#define LDLM_FL_AST_SENT 0x0000000000000020ULL // bit 5
+#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 5)
+#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG(( _l), 1ULL << 5)
+#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
+
+/**
+ * Lock is being replayed. This could probably be implied by the fact that
+ * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
+#define LDLM_FL_REPLAY 0x0000000000000100ULL // bit 8
+#define ldlm_is_replay(_l) LDLM_TEST_FLAG(( _l), 1ULL << 8)
+#define ldlm_set_replay(_l) LDLM_SET_FLAG(( _l), 1ULL << 8)
+#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
+
+/** Don't grant lock, just do intent. */
+#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL // bit 9
+#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 9)
+#define ldlm_set_intent_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 9)
+#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
+
+/** lock request has intent */
+#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL // bit 12
+#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 12)
+#define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
+#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
+
+/** discard (no writeback) on cancel */
+#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
+#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
+#define ldlm_set_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 16)
+#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
+
+/** Blocked by group lock - wait indefinitely */
+#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL // bit 17
+#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG(( _l), 1ULL << 17)
+#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG(( _l), 1ULL << 17)
+#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
+
+/**
+ * Server told not to wait if blocked. For AGL, OST will not send glimpse
+ * callback. */
+#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL // bit 18
+#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 18)
+#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG(( _l), 1ULL << 18)
+#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
+
+/** return blocking lock */
+#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL // bit 19
+#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 19)
+#define ldlm_set_test_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 19)
+#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
+
+/**
+ * Immediatelly cancel such locks when they block some other locks. Send
+ * cancel notification to original lock holder, but expect no reply. This
+ * is for clients (like liblustre) that cannot be expected to reliably
+ * response to blocking AST. */
+#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL // bit 23
+#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG(( _l), 1ULL << 23)
+#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG(( _l), 1ULL << 23)
+#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
+
+/**
+ * measure lock contention and return -EUSERS if locking contention is high */
+#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL // bit 30
+#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG(( _l), 1ULL << 30)
+#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG(( _l), 1ULL << 30)
+#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
+
+/**
+ * These are flags that are mapped into the flags and ASTs of blocking
+ * locks Add FL_DISCARD to blocking ASTs */
+#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL // bit 31
+#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 31)
+#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 31)
+#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
+
+/**
+ * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
+ * + race with upcoming bl_ast. */
+#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL // bit 32
+#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 32)
+#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG(( _l), 1ULL << 32)
+#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
+
+/**
+ * Used while processing the unused list to know that we have already
+ * handled this lock and decided to skip it. */
+#define LDLM_FL_SKIPPED 0x0000000200000000ULL // bit 33
+#define ldlm_is_skipped(_l) LDLM_TEST_FLAG(( _l), 1ULL << 33)
+#define ldlm_set_skipped(_l) LDLM_SET_FLAG(( _l), 1ULL << 33)
+#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
+
+/** this lock is being destroyed */
+#define LDLM_FL_CBPENDING 0x0000000400000000ULL // bit 34
+#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG(( _l), 1ULL << 34)
+#define ldlm_set_cbpending(_l) LDLM_SET_FLAG(( _l), 1ULL << 34)
+#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
+
+/** not a real flag, not saved in lock */
+#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL // bit 35
+#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 35)
+#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG(( _l), 1ULL << 35)
+#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
+
+/** cancellation callback already run */
+#define LDLM_FL_CANCEL 0x0000001000000000ULL // bit 36
+#define ldlm_is_cancel(_l) LDLM_TEST_FLAG(( _l), 1ULL << 36)
+#define ldlm_set_cancel(_l) LDLM_SET_FLAG(( _l), 1ULL << 36)
+#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
+
+/** whatever it might mean */
+#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37
+#define ldlm_is_local_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 37)
+#define ldlm_set_local_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 37)
+#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
+
+/** don't run the cancel callback under ldlm_cli_cancel_unused */
+#define LDLM_FL_FAILED 0x0000004000000000ULL // bit 38
+#define ldlm_is_failed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 38)
+#define ldlm_set_failed(_l) LDLM_SET_FLAG(( _l), 1ULL << 38)
+#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
+
+/** lock cancel has already been sent */
+#define LDLM_FL_CANCELING 0x0000008000000000ULL // bit 39
+#define ldlm_is_canceling(_l) LDLM_TEST_FLAG(( _l), 1ULL << 39)
+#define ldlm_set_canceling(_l) LDLM_SET_FLAG(( _l), 1ULL << 39)
+#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
+
+/** local lock (ie, no srv/cli split) */
+#define LDLM_FL_LOCAL 0x0000010000000000ULL // bit 40
+#define ldlm_is_local(_l) LDLM_TEST_FLAG(( _l), 1ULL << 40)
+#define ldlm_set_local(_l) LDLM_SET_FLAG(( _l), 1ULL << 40)
+#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
+
+/**
+ * XXX FIXME: This is being added to b_size as a low-risk fix to the
+ * fact that the LVB filling happens _after_ the lock has been granted,
+ * so another thread can match it before the LVB has been updated. As a
+ * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
+ * this is only needed on LOV/OSC now, where LVB is actually used and
+ * callers must set it in input flags.
+ *
+ * The proper fix is to do the granting inside of the completion AST,
+ * which can be replaced with a LVB-aware wrapping function for OSC locks.
+ * That change is pretty high-risk, though, and would need a lot more
+ * testing. */
+#define LDLM_FL_LVB_READY 0x0000020000000000ULL // bit 41
+#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG(( _l), 1ULL << 41)
+#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG(( _l), 1ULL << 41)
+#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
+
+/**
+ * A lock contributes to the known minimum size (KMS) calculation until it
+ * has finished the part of its cancelation that performs write back on its
+ * dirty pages. It can remain on the granted list during this whole time.
+ * Threads racing to update the KMS after performing their writeback need
+ * to know to exclude each other's locks from the calculation as they walk
+ * the granted list. */
+#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL // bit 42
+#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG(( _l), 1ULL << 42)
+#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG(( _l), 1ULL << 42)
+#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
+
+/** completion AST to be executed */
+#define LDLM_FL_CP_REQD 0x0000080000000000ULL // bit 43
+#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG(( _l), 1ULL << 43)
+#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG(( _l), 1ULL << 43)
+#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
+
+/** cleanup_resource has already handled the lock */
+#define LDLM_FL_CLEANED 0x0000100000000000ULL // bit 44
+#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG(( _l), 1ULL << 44)
+#define ldlm_set_cleaned(_l) LDLM_SET_FLAG(( _l), 1ULL << 44)
+#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
+
+/**
+ * optimization hint: LDLM can run blocking callback from current context
+ * w/o involving separate thread. in order to decrease cs rate */
+#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL // bit 45
+#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG(( _l), 1ULL << 45)
+#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG(( _l), 1ULL << 45)
+#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
+
+/**
+ * It may happen that a client initiates two operations, e.g. unlink and
+ * mkdir, such that the server sends a blocking AST for conflicting locks
+ * to this client for the first operation, whereas the second operation
+ * has canceled this lock and is waiting for rpc_lock which is taken by
+ * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
+ * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
+ *
+ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
+ * dropped to let ldlm_callback_handler() return EINVAL to the server. It
+ * is used when ELC RPC is already prepared and is waiting for rpc_lock,
+ * too late to send a separate CANCEL RPC. */
+#define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46
+#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG(( _l), 1ULL << 46)
+#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG(( _l), 1ULL << 46)
+#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
+
+/** whatever it might mean */
+#define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47
+#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG(( _l), 1ULL << 47)
+#define ldlm_set_bl_done(_l) LDLM_SET_FLAG(( _l), 1ULL << 47)
+#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
+
+/**
+ * Don't put lock into the LRU list, so that it is not canceled due
+ * to aging. Used by MGC locks, they are cancelled only at unmount or
+ * by callback. */
+#define LDLM_FL_NO_LRU 0x0001000000000000ULL // bit 48
+#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG(( _l), 1ULL << 48)
+#define ldlm_set_no_lru(_l) LDLM_SET_FLAG(( _l), 1ULL << 48)
+#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
+
+/**
+ * Set for locks that failed and where the server has been notified.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL // bit 49
+#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG(( _l), 1ULL << 49)
+#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG(( _l), 1ULL << 49)
+#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
+
+/**
+ * Set for locks that were removed from class hash table and will
+ * be destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_DESTROYED 0x0004000000000000ULL // bit 50
+#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 50)
+#define ldlm_set_destroyed(_l) LDLM_SET_FLAG(( _l), 1ULL << 50)
+#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
+
+/** flag whether this is a server namespace lock */
+#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL // bit 51
+#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 51)
+#define ldlm_set_server_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 51)
+#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
+
+/**
+ * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+ *
+ * NB: compared with check_res_locked(), checking this bit is cheaper.
+ * Also, spin_is_locked() is deprecated for kernel code; one reason is
+ * because it works only for SMP so user needs to add extra macros like
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
+#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL // bit 52
+#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG(( _l), 1ULL << 52)
+#define ldlm_set_res_locked(_l) LDLM_SET_FLAG(( _l), 1ULL << 52)
+#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
+
+/**
+ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
+ * lock-timeout timer and it will never be reset.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_WAITED 0x0020000000000000ULL // bit 53
+#define ldlm_is_waited(_l) LDLM_TEST_FLAG(( _l), 1ULL << 53)
+#define ldlm_set_waited(_l) LDLM_SET_FLAG(( _l), 1ULL << 53)
+#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
+
+/** Flag whether this is a server namespace lock. */
+#define LDLM_FL_NS_SRV 0x0040000000000000ULL // bit 54
+#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 54)
+#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
+#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
+
+/** test for ldlm_lock flag bit set */
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** set a ldlm_lock flag bit */
+#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+
+/** clear a ldlm_lock flag bit */
+#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
+
+/** Mask of flags inherited from parent lock when doing intents. */
+#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
+
+/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
+#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+
+/** @} subgroup */
+/** @} group */
+#ifdef WIRESHARK_COMPILE
+static int hf_lustre_ldlm_fl_lock_changed = -1;
+static int hf_lustre_ldlm_fl_block_granted = -1;
+static int hf_lustre_ldlm_fl_block_conv = -1;
+static int hf_lustre_ldlm_fl_block_wait = -1;
+static int hf_lustre_ldlm_fl_ast_sent = -1;
+static int hf_lustre_ldlm_fl_replay = -1;
+static int hf_lustre_ldlm_fl_intent_only = -1;
+static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_discard_data = -1;
+static int hf_lustre_ldlm_fl_no_timeout = -1;
+static int hf_lustre_ldlm_fl_block_nowait = -1;
+static int hf_lustre_ldlm_fl_test_lock = -1;
+static int hf_lustre_ldlm_fl_cancel_on_block = -1;
+static int hf_lustre_ldlm_fl_deny_on_contention = -1;
+static int hf_lustre_ldlm_fl_ast_discard_data = -1;
+static int hf_lustre_ldlm_fl_fail_loc = -1;
+static int hf_lustre_ldlm_fl_skipped = -1;
+static int hf_lustre_ldlm_fl_cbpending = -1;
+static int hf_lustre_ldlm_fl_wait_noreproc = -1;
+static int hf_lustre_ldlm_fl_cancel = -1;
+static int hf_lustre_ldlm_fl_local_only = -1;
+static int hf_lustre_ldlm_fl_failed = -1;
+static int hf_lustre_ldlm_fl_canceling = -1;
+static int hf_lustre_ldlm_fl_local = -1;
+static int hf_lustre_ldlm_fl_lvb_ready = -1;
+static int hf_lustre_ldlm_fl_kms_ignore = -1;
+static int hf_lustre_ldlm_fl_cp_reqd = -1;
+static int hf_lustre_ldlm_fl_cleaned = -1;
+static int hf_lustre_ldlm_fl_atomic_cb = -1;
+static int hf_lustre_ldlm_fl_bl_ast = -1;
+static int hf_lustre_ldlm_fl_bl_done = -1;
+static int hf_lustre_ldlm_fl_no_lru = -1;
+static int hf_lustre_ldlm_fl_fail_notified = -1;
+static int hf_lustre_ldlm_fl_destroyed = -1;
+static int hf_lustre_ldlm_fl_server_lock = -1;
+static int hf_lustre_ldlm_fl_res_locked = -1;
+static int hf_lustre_ldlm_fl_waited = -1;
+static int hf_lustre_ldlm_fl_ns_srv = -1;
+
+const value_string lustre_ldlm_flags_vals[] = {
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
+ {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
+ {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
+ {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
+ {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
+ {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
+ {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
+ {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
+ {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
+ {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
+ {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
+ {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
+ {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
+ {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
+ {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
+ {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
+ {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
+ {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
+ {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
+ {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
+ {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
+ {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
+ {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
+ { 0, NULL }
+};
+#endif /* WIRESHARK_COMPILE */
+#endif /* LDLM_ALL_FLAGS_MASK */
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- if (!lock->l_ns_srv)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_lock(&lock->l_lock);
lock_res(lock->l_resource);
- lock->l_res_locked = 1;
+ lock->l_flags |= LDLM_FL_RES_LOCKED;
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_res_locked = 0;
+ lock->l_flags &= ~LDLM_FL_RES_LOCKED;
unlock_res(lock->l_resource);
- if (!lock->l_ns_srv)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_unlock(&lock->l_lock);
}
EXPORT_SYMBOL(unlock_res_and_lock);
LASSERT(cfs_list_empty(&res->lr_converting));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
- !(lock->l_flags & LDLM_AST_DISCARD_DATA));
+ !(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
check_res_locked(res);
*err = ELDLM_OK;
!ns_is_client(ldlm_res_to_ns(res)))
class_fail_export(lock->l_export);
- lock_res(res);
- if (rc == -ERESTART) {
- /* 15715: The lock was granted and destroyed after
- * resource lock was dropped. Interval node was freed
- * in ldlm_lock_destroy. Anyway, this always happens
- * when a client is being evicted. So it would be
- * ok to return an error. -jay */
- if (lock->l_destroyed) {
- *err = -EAGAIN;
- GOTO(out, rc = -EAGAIN);
- }
-
- /* lock was granted while resource was unlocked. */
- if (lock->l_granted_mode == lock->l_req_mode) {
- /* bug 11300: if the lock has been granted,
- * break earlier because otherwise, we will go
- * to restart and ldlm_resource_unlink will be
- * called and it causes the interval node to be
- * freed. Then we will fail at
- * ldlm_extent_add_lock() */
- *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV |
- LDLM_FL_BLOCK_WAIT);
- GOTO(out, rc = 0);
- }
-
- GOTO(restart, -ERESTART);
- }
-
- *flags |= LDLM_FL_BLOCK_GRANTED;
- /* this way we force client to wait for the lock
- * endlessly once the lock is enqueued -bzzz */
- *flags |= LDLM_FL_NO_TIMEOUT;
-
- }
- RETURN(0);
+ lock_res(res);
+ if (rc == -ERESTART) {
+ /* 15715: The lock was granted and destroyed after
+ * resource lock was dropped. Interval node was freed
+ * in ldlm_lock_destroy. Anyway, this always happens
+ * when a client is being evicted. So it would be
+ * ok to return an error. -jay */
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ *err = -EAGAIN;
+ GOTO(out, rc = -EAGAIN);
+ }
+
+ /* lock was granted while resource was unlocked. */
+ if (lock->l_granted_mode == lock->l_req_mode) {
+ /* bug 11300: if the lock has been granted,
+ * break earlier because otherwise, we will go
+ * to restart and ldlm_resource_unlink will be
+ * called and it causes the interval node to be
+ * freed. Then we will fail at
+ * ldlm_extent_add_lock() */
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
+ GOTO(out, rc = 0);
+ }
+
+ GOTO(restart, -ERESTART);
+ }
+
+ /* this way we force client to wait for the lock
+ * endlessly once the lock is enqueued -bzzz */
+ *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
+
+ }
+ RETURN(0);
out:
- if (!cfs_list_empty(&rpc_list)) {
- LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
- discard_bl_list(&rpc_list);
- }
- RETURN(rc);
+ if (!cfs_list_empty(&rpc_list)) {
+ LASSERT(!(lock->l_flags & LDLM_FL_AST_DISCARD_DATA));
+ discard_bl_list(&rpc_list);
+ }
+ RETURN(rc);
}
#endif /* HAVE_SERVER_SUPPORT */
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_destroyed) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- RETURN(0);
- }
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ RETURN(0);
+ }
if (lock->l_flags & LDLM_FL_FAILED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_destroyed);
+ LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
LASSERT(cfs_list_empty(&lock->l_res_link));
LASSERT(cfs_list_empty(&lock->l_pending_chain));
*/
int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ int rc;
- ENTRY;
- if (lock->l_ns_srv) {
- LASSERT(cfs_list_empty(&lock->l_lru));
- RETURN(0);
- }
+ ENTRY;
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
+ LASSERT(cfs_list_empty(&lock->l_lru));
+ RETURN(0);
+ }
spin_lock(&ns->ns_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
*/
void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
- if (lock->l_ns_srv) {
- LASSERT(cfs_list_empty(&lock->l_lru));
- EXIT;
- return;
- }
+ ENTRY;
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
+ LASSERT(cfs_list_empty(&lock->l_lru));
+ EXIT;
+ return;
+ }
spin_lock(&ns->ns_lock);
if (!cfs_list_empty(&lock->l_lru)) {
LBUG();
}
- if (lock->l_destroyed) {
- LASSERT(cfs_list_empty(&lock->l_lru));
- EXIT;
- return 0;
- }
- lock->l_destroyed = 1;
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ LASSERT(cfs_list_empty(&lock->l_lru));
+ EXIT;
+ return 0;
+ }
+ lock->l_flags |= LDLM_FL_DESTROYED;
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
- if (flags == 0 && !lock->l_destroyed) {
+ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
lu_ref_add(&lock->l_reference, "handle", cfs_current());
RETURN(lock);
}
LASSERT(lock->l_resource != NULL);
lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
- if (unlikely(lock->l_destroyed)) {
+ if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
lock->l_flags |= LDLM_FL_AST_SENT;
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back. */
- if (new->l_flags & LDLM_AST_DISCARD_DATA)
+ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
LASSERT(cfs_list_empty(&lock->l_bl_ast));
cfs_list_add(&lock->l_bl_ast, work_list);
(lock->l_flags & LDLM_FL_CBPENDING)) {
/* If we received a blocked AST and this was the last reference,
* run the callback. */
- if (lock->l_ns_srv && lock->l_export)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
CERROR("FL_CBPENDING set on non-local lock--just a "
"warning\n");
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
policy->l_inodebits.bits))
continue;
- if (!unref &&
- (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_failed))
+ if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
continue;
if ((flags & LDLM_FL_LOCAL_ONLY) &&
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
{
- if (!lock->l_failed) {
- lock->l_failed = 1;
- cfs_waitq_broadcast(&lock->l_waitq);
- }
+ if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
+ lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
+ cfs_waitq_broadcast(&lock->l_waitq);
+ }
}
EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
ldlm_lock2handle(lock, lockh);
if ((flags & LDLM_FL_LVB_READY) &&
(!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ __u64 wait_flags = LDLM_FL_LVB_READY |
+ LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
NULL, LWI_ON_SIGNAL_NOOP, NULL);
- /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
- l_wait_event(lock->l_waitq,
- lock->l_flags & LDLM_FL_LVB_READY ||
- lock->l_destroyed || lock->l_failed,
- &lwi);
+ /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
+ l_wait_event(lock->l_waitq,
+ lock->l_flags & wait_flags,
+ &lwi);
if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
lock = ldlm_handle2lock(lockh);
if (lock != NULL) {
lock_res_and_lock(lock);
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_failed)
+ if (lock->l_flags & LDLM_FL_GONE_MASK)
GOTO(out, mode);
if (lock->l_flags & LDLM_FL_CBPENDING &&
lock->l_req_mode = mode;
lock->l_ast_data = data;
lock->l_pid = cfs_curproc_pid();
- lock->l_ns_srv = !!ns_is_server(ns);
+ if (ns_is_server(ns))
+ lock->l_flags |= LDLM_FL_NS_SRV;
if (cbs) {
lock->l_blocking_ast = cbs->lcs_blocking;
lock->l_completion_ast = cbs->lcs_completion;
lock->l_glimpse_ast = cbs->lcs_glimpse;
- lock->l_weigh_ast = cbs->lcs_weigh;
}
lock->l_tree_node = NULL;
node = NULL;
}
- /* Some flags from the enqueue want to make it into the AST, via the
- * lock's l_flags. */
- lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
-
- /* This distinction between local lock trees is very important; a client
- * namespace only has information about locks taken by that client, and
- * thus doesn't have enough information to decide for itself if it can
- * be granted (below). In this case, we do exactly what the server
- * tells us to do, as dictated by the 'flags'.
- *
- * We do exactly the same thing during recovery, when the server is
- * more or less trusting the clients not to lie.
- *
- * FIXME (bug 268): Detect obvious lies by checking compatibility in
- * granted/converting queues. */
+ /* Some flags from the enqueue want to make it into the AST, via the
+ * lock's l_flags. */
+ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+
+ /* This distinction between local lock trees is very important; a client
+ * namespace only has information about locks taken by that client, and
+ * thus doesn't have enough information to decide for itself if it can
+ * be granted (below). In this case, we do exactly what the server
+ * tells us to do, as dictated by the 'flags'.
+ *
+ * We do exactly the same thing during recovery, when the server is
+ * more or less trusting the clients not to lie.
+ *
+ * FIXME (bug 268): Detect obvious lies by checking compatibility in
+ * granted/converting queues. */
if (local) {
if (*flags & LDLM_FL_BLOCK_CONV)
ldlm_resource_add_lock(res, &res->lr_converting, lock);
LBUG();
}
- if (lock->l_waited)
+ if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock);
/* Releases cancel callback. */
ldlm_cancel_callback(lock);
- /* Yes, second time, just in case it was added again while we were
- running with no res lock in ldlm_cancel_callback */
- if (lock->l_waited)
+ /* Yes, second time, just in case it was added again while we were
+ * running with no res lock in ldlm_cancel_callback */
+ if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock);
ldlm_resource_unlink_lock(lock);
continue;
}
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
/* release the lock refcount where
* waiting_locks_callback() founds */
LDLM_LOCK_RELEASE(lock);
libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
cfs_list_del_init(&lock->l_pending_chain);
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
/* relay the lock refcount decrease to
* expired lock thread */
cfs_list_add(&lock->l_pending_chain,
libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
cfs_list_del_init(&lock->l_pending_chain);
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
/* relay the lock refcount decrease to
* expired lock thread */
cfs_list_add(&lock->l_pending_chain,
int timeout = ldlm_get_enq_timeout(lock);
/* NB: must be called with hold of lock_res_and_lock() */
- LASSERT(lock->l_res_locked);
- lock->l_waited = 1;
+ LASSERT(lock->l_flags & LDLM_FL_RES_LOCKED);
+ lock->l_flags |= LDLM_FL_WAITED;
LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
spin_lock_bh(&waiting_locks_spinlock);
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
static cfs_time_t next;
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
# ifdef HAVE_SERVER_SUPPORT
static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
{
- LASSERT(lock->l_res_locked);
- LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
+ LASSERT((lock->l_flags & (LDLM_FL_RES_LOCKED|LDLM_FL_CANCEL_ON_BLOCK))
+ == LDLM_FL_RES_LOCKED);
RETURN(1);
}
RETURN(0);
}
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
/* What's the point? */
unlock_res_and_lock(lock);
ptlrpc_req_finished(req);
struct ldlm_lock *lock)
{
int lvb_len;
- CFS_LIST_HEAD(ast_list);
+ CFS_LIST_HEAD(ast_list);
int rc = 0;
- ENTRY;
+ ENTRY;
- LDLM_DEBUG(lock, "client completion callback handler START");
+ LDLM_DEBUG(lock, "client completion callback handler START");
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
- int to = cfs_time_seconds(1);
- while (to > 0) {
- cfs_schedule_timeout_and_set_state(
- CFS_TASK_INTERRUPTIBLE, to);
- if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_destroyed)
- break;
- }
- }
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
+ int to = cfs_time_seconds(1);
+ while (to > 0) {
+ cfs_schedule_timeout_and_set_state(
+ CFS_TASK_INTERRUPTIBLE, to);
+ if (lock->l_granted_mode == lock->l_req_mode ||
+ lock->l_flags & LDLM_FL_DESTROYED)
+ break;
+ }
+ }
lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
if (lvb_len < 0) {
}
}
- lock_res_and_lock(lock);
- if (lock->l_destroyed ||
- lock->l_granted_mode == lock->l_req_mode) {
- /* bug 11300: the lock has already been granted */
- unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "Double grant race happened");
+ lock_res_and_lock(lock);
+ if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ lock->l_granted_mode == lock->l_req_mode) {
+ /* bug 11300: the lock has already been granted */
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "Double grant race happened");
GOTO(out, rc = 0);
- }
+ }
- /* If we receive the completion AST before the actual enqueue returned,
- * then we might need to switch lock modes, resources, or extents. */
- if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
- lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
- LDLM_DEBUG(lock, "completion AST, new lock mode");
- }
+ /* If we receive the completion AST before the actual enqueue returned,
+ * then we might need to switch lock modes, resources, or extents. */
+ if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
+ lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
+ LDLM_DEBUG(lock, "completion AST, new lock mode");
+ }
- if (lock->l_resource->lr_type != LDLM_PLAIN) {
- ldlm_convert_policy_to_local(req->rq_export,
- dlm_req->lock_desc.l_resource.lr_type,
- &dlm_req->lock_desc.l_policy_data,
- &lock->l_policy_data);
- LDLM_DEBUG(lock, "completion AST, new policy data");
- }
+ if (lock->l_resource->lr_type != LDLM_PLAIN) {
+ ldlm_convert_policy_to_local(req->rq_export,
+ dlm_req->lock_desc.l_resource.lr_type,
+ &dlm_req->lock_desc.l_policy_data,
+ &lock->l_policy_data);
+ LDLM_DEBUG(lock, "completion AST, new policy data");
+ }
ldlm_resource_unlink_lock(lock);
if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
*/
static int ldlm_completion_tail(struct ldlm_lock *lock)
{
- long delay;
- int result;
-
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
- LDLM_DEBUG(lock, "client-side enqueue: destroyed");
- result = -EIO;
- } else {
- delay = cfs_time_sub(cfs_time_current_sec(),
- lock->l_last_activity);
- LDLM_DEBUG(lock, "client-side enqueue: granted after "
- CFS_DURATION_T"s", delay);
-
- /* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock),
- delay);
- result = 0;
- }
- return result;
+ long delay;
+ int result;
+
+ if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ LDLM_DEBUG(lock, "client-side enqueue: destroyed");
+ result = -EIO;
+ } else {
+ delay = cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity);
+ LDLM_DEBUG(lock, "client-side enqueue: granted after "
+ CFS_DURATION_T"s", delay);
+
+ /* Update our time estimate */
+ at_measured(ldlm_lock_to_ns_at(lock),
+ delay);
+ result = 0;
+ }
+ return result;
}
/**
LDLM_DEBUG(lock, "client-side enqueue START");
LASSERT(exp == lock->l_conn_export);
} else {
- const struct ldlm_callback_suite cbs = {
- .lcs_completion = einfo->ei_cb_cp,
- .lcs_blocking = einfo->ei_cb_bl,
- .lcs_glimpse = einfo->ei_cb_gl,
- .lcs_weigh = einfo->ei_cb_wg
- };
- lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, &cbs, einfo->ei_cbdata,
+ const struct ldlm_callback_suite cbs = {
+ .lcs_completion = einfo->ei_cb_cp,
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl
+ };
+ lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
+ einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (lock == NULL)
- RETURN(-ENOMEM);
+ if (lock == NULL)
+ RETURN(-ENOMEM);
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
struct ldlm_lock *lock)
{
- check_res_locked(res);
+ check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock:\n");
+ LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_destroyed) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- return;
- }
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+ return;
+ }
- LASSERT(cfs_list_empty(&lock->l_res_link));
+ LASSERT(cfs_list_empty(&lock->l_res_link));
- cfs_list_add_tail(&lock->l_res_link, head);
+ cfs_list_add_tail(&lock->l_res_link, head);
}
/**
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
- if (new->l_destroyed) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- goto out;
- }
+ if (new->l_flags & LDLM_FL_DESTROYED) {
+ CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+ goto out;
+ }
LASSERT(cfs_list_empty(&new->l_res_link));
rc = md_lock_match(sbi->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
&lli->lli_fid, LDLM_IBITS, &policy, LCK_CR, &lockh);
if (!rc) {
- struct ldlm_enqueue_info einfo = {LDLM_IBITS, LCK_CR,
- llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL,
- inode};
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = llu_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cbdata = inode,
+ };
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it,
&op_data, &lockh, NULL, 0, NULL,
int cmd,
struct file_lock *file_lock)
{
- struct llu_inode_info *lli = llu_i2info(ino);
- struct intnl_stat *st = llu_i2stat(ino);
- struct ldlm_res_id res_id =
- { .name = {fid_seq(&lli->lli_fid),
- fid_oid(&lli->lli_fid),
- fid_ver(&lli->lli_fid),
- LDLM_FLOCK} };
- struct ldlm_enqueue_info einfo = { LDLM_FLOCK, 0, NULL,
- ldlm_flock_completion_ast, NULL, NULL, file_lock };
-
- struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock;
+ struct llu_inode_info *lli = llu_i2info(ino);
+ struct ldlm_res_id res_id =
+ { .name = {fid_seq(&lli->lli_fid),
+ fid_oid(&lli->lli_fid),
+ fid_ver(&lli->lli_fid),
+ LDLM_FLOCK} };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_FLOCK,
+ .ei_mode = 0,
+ .ei_cb_cp = ldlm_flock_completion_ast,
+ .ei_cbdata = file_lock,
+ };
+ struct intnl_stat *st = llu_i2stat(ino);
+ struct lustre_handle lockh = {0};
+ ldlm_policy_data_t flock;
__u64 flags = 0;
- int rc;
+ int rc;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%llu file_lock=%p\n",
(unsigned long long)st->st_ino, file_lock);
}
static int llu_lov_setstripe_ea_info(struct inode *ino, int flags,
- struct lov_user_md *lum, int lum_size)
+ struct lov_user_md *lum, int lum_size)
{
- struct llu_sb_info *sbi = llu_i2sbi(ino);
- struct llu_inode_info *lli = llu_i2info(ino);
- struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
- struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CR,
- llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
- struct ptlrpc_request *req = NULL;
- struct lustre_md md;
- struct md_op_data data = {{ 0 }};
- struct lustre_handle lockh;
- int rc = 0;
- ENTRY;
+ struct llu_sb_info *sbi = llu_i2sbi(ino);
+ struct llu_inode_info *lli = llu_i2info(ino);
+ struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = llu_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct ptlrpc_request *req = NULL;
+ struct lustre_md md;
+ struct md_op_data data = {{ 0 }};
+ struct lustre_handle lockh;
+ int rc = 0;
+ ENTRY;
if (lli->lli_has_smd) {
- CDEBUG(D_IOCTL, "stripe already exists for ino "DFID"\n",
- PFID(&lli->lli_fid));
- return -EEXIST;
- }
+ CDEBUG(D_IOCTL, "stripe already exists for ino "DFID"\n",
+ PFID(&lli->lli_fid));
+ return -EEXIST;
+ }
llu_prep_md_op_data(&data, NULL, ino, NULL, 0, O_RDWR,
LUSTRE_OPC_ANY);
rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
if (!rc) {
- struct ldlm_enqueue_info einfo = {.ei_type = LDLM_IBITS,
- .ei_mode = mode,
- .ei_cb_bl =
- ll_md_blocking_ast,
- .ei_cb_cp =
- ldlm_completion_ast,
- .ei_cb_gl = NULL,
- .ei_cb_wg = NULL,
- .ei_cbdata = NULL};
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = mode,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
struct lookup_intent it = { .it_op = IT_READDIR };
struct ptlrpc_request *request;
struct md_op_data *op_data;
int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_FLOCK,
- .ei_cb_cp =ldlm_flock_completion_ast,
- .ei_cbdata = file_lock };
- struct md_op_data *op_data;
- struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock = {{0}};
- int flags = 0;
- int rc;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_FLOCK,
+ .ei_cb_cp = ldlm_flock_completion_ast,
+ .ei_cbdata = file_lock,
+ };
+ struct md_op_data *op_data;
+ struct lustre_handle lockh = {0};
+ ldlm_policy_data_t flock = {{0}};
+ int flags = 0;
+ int rc;
int rc2 = 0;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
inode->i_ino, file_lock);
struct lookup_intent it;
struct lustre_handle lockh;
ldlm_mode_t mode;
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- .ei_cbdata = NULL };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
int rc;
ENTRY;
* this and use the request from revalidate. In this case, revalidate
* never dropped its reference, so the refcounts are all OK */
if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
- struct ldlm_enqueue_info einfo =
- { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
- ldlm_completion_ast, NULL, NULL, NULL };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(it),
+ .ei_cb_bl = cb_blocking,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
void *data, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh)
{
- struct config_llog_data *cld = (struct config_llog_data *)data;
- struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, NULL, NULL };
- struct ptlrpc_request *req;
- int short_limit = cld_is_sptlrpc(cld);
- int rc;
- ENTRY;
+ struct config_llog_data *cld = (struct config_llog_data *)data;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = type,
+ .ei_mode = mode,
+ .ei_cb_bl = mgc_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct ptlrpc_request *req;
+ int short_limit = cld_is_sptlrpc(cld);
+ int rc;
+ ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
{
struct ofd_thread_info *info = ofd_info(env);
struct lustre_handle lockh;
- __u64 flags = LDLM_AST_DISCARD_DATA, rc = 0;
+ __u64 flags = LDLM_FL_AST_DISCARD_DATA;
+ __u64 rc = 0;
ldlm_policy_data_t policy = {
.l_extent = { 0, OBD_OBJECT_EOF }
};
*/
static int osc_lock_invariant(struct osc_lock *ols)
{
- struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
- int handle_used = lustre_handle_is_used(&ols->ols_handle);
-
- return
- ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && ols->ols_lock == NULL) ||
- (ergo(olock != NULL, handle_used) &&
- ergo(olock != NULL,
- olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
- /*
- * Check that ->ols_handle and ->ols_lock are consistent, but
- * take into account that they are set at the different time.
- */
- ergo(handle_used,
- ergo(lock != NULL && olock != NULL, lock == olock) &&
- ergo(lock == NULL, olock == NULL)) &&
- ergo(ols->ols_state == OLS_CANCELLED,
- olock == NULL && !handle_used) &&
- /*
- * DLM lock is destroyed only after we have seen cancellation
- * ast.
- */
- ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
- !olock->l_destroyed) &&
- ergo(ols->ols_state == OLS_GRANTED,
- olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
- ols->ols_hold));
+ struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
+ struct ldlm_lock *olock = ols->ols_lock;
+ int handle_used = lustre_handle_is_used(&ols->ols_handle);
+
+ if (ergo(osc_lock_is_lockless(ols),
+ ols->ols_locklessable && ols->ols_lock == NULL))
+ return 1;
+
+ /*
+ * If all the following "ergo"s are true, return 1, otherwise 0
+ */
+ if (! ergo(olock != NULL, handle_used))
+ return 0;
+
+ if (! ergo(olock != NULL,
+ olock->l_handle.h_cookie == ols->ols_handle.cookie))
+ return 0;
+
+ if (! ergo(handle_used,
+ ergo(lock != NULL && olock != NULL, lock == olock) &&
+ ergo(lock == NULL, olock == NULL)))
+ return 0;
+ /*
+ * Check that ->ols_handle and ->ols_lock are consistent, but
+ * take into account that they are set at the different time.
+ */
+ if (! ergo(ols->ols_state == OLS_CANCELLED,
+ olock == NULL && !handle_used))
+ return 0;
+ /*
+ * DLM lock is destroyed only after we have seen cancellation
+ * ast.
+ */
+ if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
+ ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ return 0;
+
+ if (! ergo(ols->ols_state == OLS_GRANTED,
+ olock != NULL &&
+ olock->l_req_mode == olock->l_granted_mode &&
+ ols->ols_hold))
+ return 0;
+ return 1;
}
/*****************************************************************************
static __u64 osc_enq2ldlm_flags(__u32 enqflags)
{
- __u64 result = 0;
+ __u64 result = 0;
- LASSERT((enqflags & ~CEF_MASK) == 0);
+ LASSERT((enqflags & ~CEF_MASK) == 0);
- if (enqflags & CEF_NONBLOCK)
- result |= LDLM_FL_BLOCK_NOWAIT;
- if (enqflags & CEF_ASYNC)
- result |= LDLM_FL_HAS_INTENT;
- if (enqflags & CEF_DISCARD_DATA)
- result |= LDLM_AST_DISCARD_DATA;
- return result;
+ if (enqflags & CEF_NONBLOCK)
+ result |= LDLM_FL_BLOCK_NOWAIT;
+ if (enqflags & CEF_ASYNC)
+ result |= LDLM_FL_HAS_INTENT;
+ if (enqflags & CEF_DISCARD_DATA)
+ result |= LDLM_FL_AST_DISCARD_DATA;
+ return result;
}
/**
return cl_object_header(slice->cls_obj)->coh_pages;
}
-/**
- * Get the weight of dlm lock for early cancellation.
- *
- * XXX: it should return the pages covered by this \a dlmlock.
- */
-static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *lock;
- struct cl_lock *cll;
- unsigned long weight;
- ENTRY;
-
- cfs_might_sleep();
- /*
- * osc_ldlm_weigh_ast has a complex context since it might be called
- * because of lock canceling, or from user's input. We have to make
- * a new environment for it. Probably it is implementation safe to use
- * the upper context because cl_lock_put don't modify environment
- * variables. But in case of ..
- */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* Mostly because lack of memory, tend to eliminate this lock*/
- RETURN(0);
-
- LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
- lock = osc_ast_data_get(dlmlock);
- if (lock == NULL) {
- /* cl_lock was destroyed because of memory pressure.
- * It is much reasonable to assign this type of lock
- * a lower cost.
- */
- GOTO(out, weight = 0);
- }
-
- cll = lock->ols_cl.cls_lock;
- cl_lock_mutex_get(env, cll);
- weight = cl_lock_weigh(env, cll);
- cl_lock_mutex_put(env, cll);
- osc_ast_data_put(env, lock);
- EXIT;
-
-out:
- cl_env_nested_put(&nest, env);
- return weight;
-}
-
static void osc_lock_build_einfo(const struct lu_env *env,
const struct cl_lock *clock,
struct osc_lock *lock,
einfo->ei_cb_bl = osc_ldlm_blocking_ast;
einfo->ei_cb_cp = osc_ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cb_wg = osc_ldlm_weigh_ast;
einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
}
/* standard truncate optimization: if file body is completely
* destroyed, don't send data back to the server. */
if (body->oa.o_size == 0)
- flags |= LDLM_AST_DISCARD_DATA;
+ flags |= LDLM_FL_AST_DISCARD_DATA;
repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
repbody->oa = body->oa;
{
LASSERT(lock->l_export == opd->opd_exp);
- if (lock->l_destroyed) /* lock already cancelled */
+ if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
return;
/* XXX: never try to grab resource lock here because we're inside
#include "qsd_internal.h"
+typedef int (enqi_bl_cb_t)(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc, void *data,
+ int flag);
+static enqi_bl_cb_t qsd_glb_blocking_ast, qsd_id_blocking_ast;
+
+typedef int (enqi_gl_cb_t)(struct ldlm_lock *lock, void *data);
+static enqi_gl_cb_t qsd_glb_glimpse_ast, qsd_id_glimpse_ast;
+
+struct ldlm_enqueue_info qsd_glb_einfo = {
+ .ei_type = LDLM_PLAIN,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = qsd_glb_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = qsd_glb_glimpse_ast,
+};
+
+struct ldlm_enqueue_info qsd_id_einfo = {
+ .ei_type = LDLM_PLAIN,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = qsd_id_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = qsd_id_glimpse_ast,
+};
+
/*
* Return qsd_qtype_info structure associated with a global lock
*
return rc;
}
-struct ldlm_enqueue_info qsd_glb_einfo = { LDLM_PLAIN,
- LCK_CR,
- qsd_glb_blocking_ast,
- ldlm_completion_ast,
- qsd_glb_glimpse_ast,
- NULL, NULL };
-/*
+/**
* Blocking callback handler for per-ID lock
*
* \param lock - is the lock for which ast occurred.
RETURN(rc);
}
-struct ldlm_enqueue_info qsd_id_einfo = { LDLM_PLAIN,
- LCK_CR,
- qsd_id_blocking_ast,
- ldlm_completion_ast,
- qsd_id_glimpse_ast,
- NULL, NULL };
-
-/*
+/**
* Check whether a slave already own a ldlm lock for the quota identifier \qid.
*
* \param lockh - is the local lock handle from lquota entry.