1 /* -*- buffer-read-only: t -*- vi: set ro:
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 only,
5 * as published by the Free Software Foundation.
7 * Lustre is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License along
13 * with this program. If not, see <http://www.gnu.org/licenses/>.
16 * \file lustre_dlm_flags.h
17 * The flags and collections of flags (masks) for \see struct ldlm_lock.
19 * \addtogroup LDLM Lustre Distributed Lock Manager
23 * The flags and collections of flags (masks) for \see struct ldlm_lock.
26 #ifndef LDLM_ALL_FLAGS_MASK
28 /** l_flags bits marked as "all_flags" bits */
29 #define LDLM_FL_ALL_FLAGS_MASK 0x00FFFFFFC08F932FULL
31 /** extent, mode, or resource changed */
32 #define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
33 #define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 0)
34 #define ldlm_set_lock_changed(_l) LDLM_SET_FLAG(( _l), 1ULL << 0)
35 #define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
38 * Server placed lock on granted list, or a recovering client wants the
39 * lock added to the granted list, no questions asked. */
40 #define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL // bit 1
41 #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG(( _l), 1ULL << 1)
42 #define ldlm_set_block_granted(_l) LDLM_SET_FLAG(( _l), 1ULL << 1)
43 #define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
46 * Server placed lock on conv list, or a recovering client wants the lock
47 * added to the conv list, no questions asked. */
48 #define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL // bit 2
49 #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 2)
50 #define ldlm_set_block_conv(_l) LDLM_SET_FLAG(( _l), 1ULL << 2)
51 #define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
54 * Server placed lock on wait list, or a recovering client wants the lock
55 * added to the wait list, no questions asked. */
56 #define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL // bit 3
57 #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 3)
58 #define ldlm_set_block_wait(_l) LDLM_SET_FLAG(( _l), 1ULL << 3)
59 #define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
62 * Lock request is speculative/asynchronous, and cannot wait for any reason.
63 * Fail the lock request if any blocking locks are encountered.
65 #define LDLM_FL_SPECULATIVE 0x0000000000000010ULL /* bit 4 */
66 #define ldlm_is_speculative(_l) LDLM_TEST_FLAG((_l), 1ULL << 4)
67 #define ldlm_set_speculative(_l) LDLM_SET_FLAG((_l), 1ULL << 4)
68 #define ldlm_clear_specualtive_(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 4)
70 /** blocking or cancel packet was queued for sending. */
71 #define LDLM_FL_AST_SENT 0x0000000000000020ULL // bit 5
72 #define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 5)
73 #define ldlm_set_ast_sent(_l) LDLM_SET_FLAG(( _l), 1ULL << 5)
74 #define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
77 * Lock is being replayed. This could probably be implied by the fact that
78 * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
79 #define LDLM_FL_REPLAY 0x0000000000000100ULL // bit 8
80 #define ldlm_is_replay(_l) LDLM_TEST_FLAG(( _l), 1ULL << 8)
81 #define ldlm_set_replay(_l) LDLM_SET_FLAG(( _l), 1ULL << 8)
82 #define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
84 /** Don't grant lock, just do intent. */
85 #define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL // bit 9
86 #define ldlm_is_intent_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 9)
87 #define ldlm_set_intent_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 9)
88 #define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
90 /** lock request has intent */
91 #define LDLM_FL_HAS_INTENT 0x0000000000001000ULL // bit 12
92 #define ldlm_is_has_intent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 12)
93 #define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
94 #define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
96 /** flock deadlock detected */
97 #define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL // bit 15
98 #define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 15)
99 #define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG(( _l), 1ULL << 15)
100 #define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15)
102 /** discard (no writeback (PW locks) or page retention (PR locks)) on cancel */
103 #define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
104 #define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
105 #define ldlm_set_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 16)
106 #define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
108 /** Blocked by group lock - wait indefinitely */
109 #define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL // bit 17
110 #define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG(( _l), 1ULL << 17)
111 #define ldlm_set_no_timeout(_l) LDLM_SET_FLAG(( _l), 1ULL << 17)
112 #define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
115 * Server told not to wait if blocked. For AGL, OST will not send glimpse
117 #define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL // bit 18
118 #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 18)
119 #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG(( _l), 1ULL << 18)
120 #define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
122 /** return blocking lock */
123 #define LDLM_FL_TEST_LOCK 0x0000000000080000ULL // bit 19
124 #define ldlm_is_test_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 19)
125 #define ldlm_set_test_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 19)
126 #define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
128 /** match lock only */
129 #define LDLM_FL_MATCH_LOCK 0x0000000000100000ULL // bit 20
132 * Immediatelly cancel such locks when they block some other locks. Send
133 * cancel notification to original lock holder, but expect no reply. This
134 * is for clients (like liblustre) that cannot be expected to reliably
135 * response to blocking AST. */
136 #define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL // bit 23
137 #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG(( _l), 1ULL << 23)
138 #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG(( _l), 1ULL << 23)
139 #define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
141 /** Flag whether a lock is enqueued from a distributed transaction, and the
142 * requesting lock mode is PW/EX, if so, it will check compatibility with COS
143 * locks, and different from original COS semantic, transactions from the same
144 * client is also treated as lock conflict. */
145 #define LDLM_FL_COS_INCOMPAT 0x0000000001000000ULL /* bit 24 */
146 #define ldlm_is_cos_incompat(_l) LDLM_TEST_FLAG((_l), 1ULL << 24)
147 #define ldlm_set_cos_incompat(_l) LDLM_SET_FLAG((_l), 1ULL << 24)
148 #define ldlm_clear_cos_incompat(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 24)
151 * Part of original lockahead implementation, OBD_CONNECT_LOCKAHEAD_OLD.
152 * Reserved temporarily to allow those implementations to keep working.
153 * Will be removed after 2.12 release.
155 #define LDLM_FL_LOCKAHEAD_OLD_RESERVED 0x0000000010000000ULL /* bit 28 */
156 #define ldlm_is_do_not_expand_io(_l) LDLM_TEST_FLAG((_l), 1ULL << 28)
157 #define ldlm_set_do_not_expand_io(_l) LDLM_SET_FLAG((_l), 1ULL << 28)
158 #define ldlm_clear_do_not_expand_io(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 28)
161 * Do not expand this lock. Grant it only on the extent requested.
162 * Used for manually requested locks from the client (LU_LADVISE_LOCKAHEAD).
164 #define LDLM_FL_NO_EXPANSION 0x0000000020000000ULL /* bit 29 */
165 #define ldlm_is_do_not_expand(_l) LDLM_TEST_FLAG((_l), 1ULL << 29)
166 #define ldlm_set_do_not_expand(_l) LDLM_SET_FLAG((_l), 1ULL << 29)
167 #define ldlm_clear_do_not_expand(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 29)
170 * measure lock contention and return -EUSERS if locking contention is high */
171 #define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL // bit 30
172 #define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG(( _l), 1ULL << 30)
173 #define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG(( _l), 1ULL << 30)
174 #define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
177 * These are flags that are mapped into the flags and ASTs of blocking
178 * locks Add FL_DISCARD to blocking ASTs */
179 #define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL // bit 31
180 #define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 31)
181 #define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 31)
182 #define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
185 * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
186 * + race with upcoming bl_ast. */
187 #define LDLM_FL_FAIL_LOC 0x0000000100000000ULL // bit 32
188 #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 32)
189 #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG(( _l), 1ULL << 32)
190 #define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
193 * Used while processing the unused list to know that we have already
194 * handled this lock and decided to skip it. */
195 #define LDLM_FL_SKIPPED 0x0000000200000000ULL // bit 33
196 #define ldlm_is_skipped(_l) LDLM_TEST_FLAG(( _l), 1ULL << 33)
197 #define ldlm_set_skipped(_l) LDLM_SET_FLAG(( _l), 1ULL << 33)
198 #define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
200 /** this lock is being destroyed */
201 #define LDLM_FL_CBPENDING 0x0000000400000000ULL // bit 34
202 #define ldlm_is_cbpending(_l) LDLM_TEST_FLAG(( _l), 1ULL << 34)
203 #define ldlm_set_cbpending(_l) LDLM_SET_FLAG(( _l), 1ULL << 34)
204 #define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
206 /** not a real flag, not saved in lock */
207 #define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL // bit 35
208 #define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 35)
209 #define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG(( _l), 1ULL << 35)
210 #define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
212 /** cancellation callback already run */
213 #define LDLM_FL_CANCEL 0x0000001000000000ULL // bit 36
214 #define ldlm_is_cancel(_l) LDLM_TEST_FLAG(( _l), 1ULL << 36)
215 #define ldlm_set_cancel(_l) LDLM_SET_FLAG(( _l), 1ULL << 36)
216 #define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
218 /** whatever it might mean -- never transmitted? */
219 #define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37
220 #define ldlm_is_local_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 37)
221 #define ldlm_set_local_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 37)
222 #define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
224 /** don't run the cancel callback under ldlm_cli_cancel_unused */
225 #define LDLM_FL_FAILED 0x0000004000000000ULL // bit 38
226 #define ldlm_is_failed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 38)
227 #define ldlm_set_failed(_l) LDLM_SET_FLAG(( _l), 1ULL << 38)
228 #define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
230 /** lock cancel has already been sent */
231 #define LDLM_FL_CANCELING 0x0000008000000000ULL // bit 39
232 #define ldlm_is_canceling(_l) LDLM_TEST_FLAG(( _l), 1ULL << 39)
233 #define ldlm_set_canceling(_l) LDLM_SET_FLAG(( _l), 1ULL << 39)
234 #define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
236 /** local lock (ie, no srv/cli split) */
237 #define LDLM_FL_LOCAL 0x0000010000000000ULL // bit 40
238 #define ldlm_is_local(_l) LDLM_TEST_FLAG(( _l), 1ULL << 40)
239 #define ldlm_set_local(_l) LDLM_SET_FLAG(( _l), 1ULL << 40)
240 #define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
243 * XXX FIXME: This is being added to b_size as a low-risk fix to the
244 * fact that the LVB filling happens _after_ the lock has been granted,
245 * so another thread can match it before the LVB has been updated. As a
246 * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
247 * this is only needed on LOV/OSC now, where LVB is actually used and
248 * callers must set it in input flags.
250 * The proper fix is to do the granting inside of the completion AST,
251 * which can be replaced with a LVB-aware wrapping function for OSC locks.
252 * That change is pretty high-risk, though, and would need a lot more
254 #define LDLM_FL_LVB_READY 0x0000020000000000ULL // bit 41
255 #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG(( _l), 1ULL << 41)
256 #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG(( _l), 1ULL << 41)
257 #define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
260 * A lock contributes to the known minimum size (KMS) calculation until it
261 * has finished the part of its cancelation that performs write back on its
262 * dirty pages. It can remain on the granted list during this whole time.
263 * Threads racing to update the KMS after performing their writeback need
264 * to know to exclude each other's locks from the calculation as they walk
265 * the granted list. */
266 #define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL // bit 42
267 #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG(( _l), 1ULL << 42)
268 #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG(( _l), 1ULL << 42)
269 #define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
271 /** completion AST to be executed */
272 #define LDLM_FL_CP_REQD 0x0000080000000000ULL // bit 43
273 #define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG(( _l), 1ULL << 43)
274 #define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG(( _l), 1ULL << 43)
275 #define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
277 /** cleanup_resource has already handled the lock */
278 #define LDLM_FL_CLEANED 0x0000100000000000ULL // bit 44
279 #define ldlm_is_cleaned(_l) LDLM_TEST_FLAG(( _l), 1ULL << 44)
280 #define ldlm_set_cleaned(_l) LDLM_SET_FLAG(( _l), 1ULL << 44)
281 #define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
284 * optimization hint: LDLM can run blocking callback from current context
285 * w/o involving separate thread. in order to decrease cs rate */
286 #define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL // bit 45
287 #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG(( _l), 1ULL << 45)
288 #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG(( _l), 1ULL << 45)
289 #define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
292 * It may happen that a client initiates two operations, e.g. unlink and
293 * mkdir, such that the server sends a blocking AST for conflicting locks
294 * to this client for the first operation, whereas the second operation
295 * has canceled this lock and is waiting for rpc_lock which is taken by
296 * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
297 * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it. */
298 #define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46
299 #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG(( _l), 1ULL << 46)
300 #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG(( _l), 1ULL << 46)
301 #define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
304 * Set by ldlm_cancel_callback() when lock cache is dropped to let
305 * ldlm_callback_handler() return EINVAL to the server. It is used when
306 * ELC RPC is already prepared and is waiting for rpc_lock, too late to
307 * send a separate CANCEL RPC. */
308 #define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47
309 #define ldlm_is_bl_done(_l) LDLM_TEST_FLAG(( _l), 1ULL << 47)
310 #define ldlm_set_bl_done(_l) LDLM_SET_FLAG(( _l), 1ULL << 47)
311 #define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
314 * Don't put lock into the LRU list, so that it is not canceled due
315 * to aging. Used by MGC locks, they are cancelled only at unmount or
317 #define LDLM_FL_NO_LRU 0x0001000000000000ULL // bit 48
318 #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG(( _l), 1ULL << 48)
319 #define ldlm_set_no_lru(_l) LDLM_SET_FLAG(( _l), 1ULL << 48)
320 #define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
323 * Set for locks that failed and where the server has been notified.
325 * Protected by lock and resource locks. */
326 #define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL // bit 49
327 #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG(( _l), 1ULL << 49)
328 #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG(( _l), 1ULL << 49)
329 #define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
332 * Set for locks that were removed from class hash table and will
333 * be destroyed when last reference to them is released. Set by
334 * ldlm_lock_destroy_internal().
336 * Protected by lock and resource locks. */
337 #define LDLM_FL_DESTROYED 0x0004000000000000ULL // bit 50
338 #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 50)
339 #define ldlm_set_destroyed(_l) LDLM_SET_FLAG(( _l), 1ULL << 50)
340 #define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
342 /** flag whether this is a server namespace lock */
343 #define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL // bit 51
344 #define ldlm_is_server_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 51)
345 #define ldlm_set_server_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 51)
346 #define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
349 * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
351 * NB: compared with check_res_locked(), checking this bit is cheaper.
352 * Also, spin_is_locked() is deprecated for kernel code; one reason is
353 * because it works only for SMP so user needs to add extra macros like
354 * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
355 #define LDLM_FL_RES_LOCKED 0x0010000000000000ULL // bit 52
356 #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG(( _l), 1ULL << 52)
357 #define ldlm_set_res_locked(_l) LDLM_SET_FLAG(( _l), 1ULL << 52)
358 #define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
361 * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
362 * lock-timeout timer and it will never be reset.
364 * Protected by lock and resource locks. */
365 #define LDLM_FL_WAITED 0x0020000000000000ULL // bit 53
366 #define ldlm_is_waited(_l) LDLM_TEST_FLAG(( _l), 1ULL << 53)
367 #define ldlm_set_waited(_l) LDLM_SET_FLAG(( _l), 1ULL << 53)
368 #define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
370 /** Flag whether this is a server namespace lock. */
371 #define LDLM_FL_NS_SRV 0x0040000000000000ULL // bit 54
372 #define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 54)
373 #define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
374 #define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
376 /** Flag whether this lock can be reused. Used by exclusive open. */
377 #define LDLM_FL_EXCL 0x0080000000000000ULL // bit 55
378 #define ldlm_is_excl(_l) LDLM_TEST_FLAG(( _l), 1ULL << 55)
379 #define ldlm_set_excl(_l) LDLM_SET_FLAG(( _l), 1ULL << 55)
380 #define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55)
382 /** Flag whether a lock is found on server for re-sent RPC. */
383 #define LDLM_FL_RESENT 0x0100000000000000ULL // bit 56
385 /** Flag whether Commit-on-Sharing is enabled, if LDLM_FL_COS_INCOMPAT is set
386 * this flag may not be set because once the former is set this flag won't be
387 * checked, and for cross-MDT lock COS_INCOMPAT is always set but ast handle is
388 * in ldlm context which doesn't know whether COS is enabled or not. */
389 #define LDLM_FL_COS_ENABLED 0x0200000000000000ULL /* bit 57 */
390 #define ldlm_is_cos_enabled(_l) LDLM_TEST_FLAG((_l), 1ULL << 57)
391 #define ldlm_set_cos_enabled(_l) LDLM_SET_FLAG((_l), 1ULL << 57)
393 /** l_flags bits marked as "ast" bits */
394 #define LDLM_FL_AST_MASK (LDLM_FL_FLOCK_DEADLOCK |\
395 LDLM_FL_AST_DISCARD_DATA)
397 /** l_flags bits marked as "blocked" bits */
398 #define LDLM_FL_BLOCKED_MASK (LDLM_FL_BLOCK_GRANTED |\
399 LDLM_FL_BLOCK_CONV |\
402 /** l_flags bits marked as "gone" bits */
403 #define LDLM_FL_GONE_MASK (LDLM_FL_DESTROYED |\
406 /** l_flags bits marked as "inherit" bits
407 * Flags inherited from wire on enqueue/reply between client/server.
408 * CANCEL_ON_BLOCK so server will not grant if a blocking lock is found
409 * NO_TIMEOUT flag to force ldlm_lock_match() to wait with no timeout.
410 * TEST_LOCK flag to not let TEST lock to be granted.
411 * NO_EXPANSION to tell server not to expand extent of lock request */
412 #define LDLM_FL_INHERIT_MASK (LDLM_FL_CANCEL_ON_BLOCK |\
413 LDLM_FL_NO_TIMEOUT |\
415 LDLM_FL_NO_EXPANSION)
417 /** flags returned in @flags parameter on ldlm_lock_enqueue,
418 * to be re-constructed on re-send */
419 #define LDLM_FL_SRV_ENQ_MASK (LDLM_FL_LOCK_CHANGED |\
420 LDLM_FL_BLOCKED_MASK |\
423 /** test for ldlm_lock flag bit set */
424 #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
426 /** multi-bit test: are any of mask bits set? */
427 #define LDLM_HAVE_MASK(_l, _m) (((_l)->l_flags & LDLM_FL_##_m##_MASK) != 0)
429 /** set a ldlm_lock flag bit */
430 #define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
432 /** clear a ldlm_lock flag bit */
433 #define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
437 #endif /* LDLM_ALL_FLAGS_MASK */