4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef _LUSTRE_DLM_H__
38 #define _LUSTRE_DLM_H__
40 /** \defgroup ldlm ldlm
45 #if defined(__linux__)
46 #include <linux/lustre_dlm.h>
47 #elif defined(__APPLE__)
48 #include <darwin/lustre_dlm.h>
49 #elif defined(__WINNT__)
50 #include <winnt/lustre_dlm.h>
52 #error Unsupported operating system.
55 #include <lustre_lib.h>
56 #include <lustre_net.h>
57 #include <lustre_import.h>
58 #include <lustre_handles.h>
59 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
65 #define OBD_LDLM_DEVICENAME "ldlm"
67 #ifdef HAVE_BGL_SUPPORT
68 /* 1.5 times the maximum 128 tasks available in VN mode */
69 #define LDLM_DEFAULT_LRU_SIZE 196
71 #define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
73 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
74 #define LDLM_CTIME_AGE_LIMIT (10)
75 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
80 ELDLM_LOCK_CHANGED = 300,
81 ELDLM_LOCK_ABORTED = 301,
82 ELDLM_LOCK_REPLACED = 302,
83 ELDLM_NO_LOCK_DATA = 303,
85 ELDLM_NAMESPACE_EXISTS = 400,
86 ELDLM_BAD_NAMESPACE = 401
90 LDLM_NAMESPACE_SERVER = 1 << 0,
91 LDLM_NAMESPACE_CLIENT = 1 << 1
95 * Declaration of flags sent through the wire.
97 #define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
99 /* If the server returns one of these flags, then the lock was put on that list.
100 * If the client sends one of these flags (during recovery ONLY!), it wants the
101 * lock added to the specified list, no questions asked. -p */
102 #define LDLM_FL_BLOCK_GRANTED 0x000002
103 #define LDLM_FL_BLOCK_CONV 0x000004
104 #define LDLM_FL_BLOCK_WAIT 0x000008
106 /* Used to be LDLM_FL_CBPENDING 0x000010 moved to non-wire flags */
108 #define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
109 * queued for sending. */
110 /* Used to be LDLM_FL_WAIT_NOREPROC 0x000040 moved to non-wire flags */
111 /* Used to be LDLM_FL_CANCEL 0x000080 moved to non-wire flags */
113 /* Lock is being replayed. This could probably be implied by the fact that one
114 * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
115 #define LDLM_FL_REPLAY 0x000100
117 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
119 /* Used to be LDLM_FL_LOCAL_ONLY 0x000400 moved to non-wire flags */
120 /* Used to be LDLM_FL_FAILED 0x000800 moved to non-wire flags */
122 #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
124 /* Used to be LDLM_FL_CANCELING 0x002000 moved to non-wire flags */
125 /* Used to be LDLM_FL_LOCAL 0x004000 moved to non-wire flags */
127 #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
129 #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
132 /* file & record locking */
133 #define LDLM_FL_BLOCK_NOWAIT 0x040000 /* server told not to wait if blocked.
134 * For AGL, OST will not send glimpse
136 #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
138 /* Used to be LDLM_FL_LVB_READY 0x100000 moved to non-wire flags */
139 /* Used to be LDLM_FL_KMS_IGNORE 0x200000 moved to non-wire flags */
140 /* Used to be LDLM_FL_NO_LRU 0x400000 moved to non-wire flags */
142 /* Immediatelly cancel such locks when they block some other locks. Send
143 * cancel notification to original lock holder, but expect no reply. This is
144 * for clients (like liblustre) that cannot be expected to reliably response
145 * to blocking ast. */
146 #define LDLM_FL_CANCEL_ON_BLOCK 0x800000
148 /* Flags flags inherited from parent lock when doing intents. */
149 #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
151 /* Used to be LDLM_FL_CP_REQD 0x1000000 moved to non-wire flags */
152 /* Used to be LDLM_FL_CLEANED 0x2000000 moved to non-wire flags */
153 /* Used to be LDLM_FL_ATOMIC_CB 0x4000000 moved to non-wire flags */
154 /* Used to be LDLM_FL_BL_AST 0x10000000 moved to non-wire flags */
155 /* Used to be LDLM_FL_BL_DONE 0x20000000 moved to non-wire flags */
157 /* measure lock contention and return -EUSERS if locking contention is high */
158 #define LDLM_FL_DENY_ON_CONTENTION 0x40000000
160 /* These are flags that are mapped into the flags and ASTs of blocking locks */
161 #define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
163 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
164 #define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
167 * --------------------------------------------------------------------------
168 * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
169 * 0x80000000 will not be sent over the wire.
170 * --------------------------------------------------------------------------
174 * Declaration of flags not sent through the wire.
176 /* Used for marking lock as an target for -EINTR while cp_ast sleep
177 * emulation + race with upcoming bl_ast. */
178 #define LDLM_FL_FAIL_LOC 0x100000000ULL
179 /* Used while processing the unused list to know that we have already
180 * handled this lock and decided to skip it */
181 #define LDLM_FL_SKIPPED 0x200000000ULL
182 /* this lock is being destroyed */
183 #define LDLM_FL_CBPENDING 0x400000000ULL
184 /* not a real flag, not saved in lock */
185 #define LDLM_FL_WAIT_NOREPROC 0x800000000ULL
186 /* cancellation callback already run */
187 #define LDLM_FL_CANCEL 0x1000000000ULL
188 #define LDLM_FL_LOCAL_ONLY 0x2000000000ULL
189 /* don't run the cancel callback under ldlm_cli_cancel_unused */
190 #define LDLM_FL_FAILED 0x4000000000ULL
191 /* lock cancel has already been sent */
192 #define LDLM_FL_CANCELING 0x8000000000ULL
193 /* local lock (ie, no srv/cli split) */
194 #define LDLM_FL_LOCAL 0x10000000000ULL
195 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
196 * the LVB filling happens _after_ the lock has been granted, so another thread
197 * can match`t before the LVB has been updated. As a dirty hack, we set
198 * LDLM_FL_LVB_READY only after we've done the LVB poop.
199 * this is only needed on lov/osc now, where lvb is actually used and callers
200 * must set it in input flags.
202 * The proper fix is to do the granting inside of the completion AST, which can
203 * be replaced with a LVB-aware wrapping function for OSC locks. That change is
204 * pretty high-risk, though, and would need a lot more testing. */
205 #define LDLM_FL_LVB_READY 0x20000000000ULL
206 /* A lock contributes to the kms calculation until it has finished the part
207 * of it's cancelation that performs write back on its dirty pages. It
208 * can remain on the granted list during this whole time. Threads racing
209 * to update the kms after performing their writeback need to know to
210 * exclude each others locks from the calculation as they walk the granted
212 #define LDLM_FL_KMS_IGNORE 0x40000000000ULL
213 /* completion ast to be executed */
214 #define LDLM_FL_CP_REQD 0x80000000000ULL
215 /* cleanup_resource has already handled the lock */
216 #define LDLM_FL_CLEANED 0x100000000000ULL
217 /* optimization hint: LDLM can run blocking callback from current context
218 * w/o involving separate thread. in order to decrease cs rate */
219 #define LDLM_FL_ATOMIC_CB 0x200000000000ULL
220 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
221 * such that server send blocking ast for conflict locks to this client for
222 * the 1st operation, whereas the 2nd operation has canceled this lock and
223 * is waiting for rpc_lock which is taken by the 1st operation.
224 * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
225 * ELC code to cancel it.
226 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
227 * droped to let ldlm_callback_handler() return EINVAL to the server. It is
228 * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
229 * to send a separate CANCEL rpc. */
230 #define LDLM_FL_BL_AST 0x400000000000ULL
231 #define LDLM_FL_BL_DONE 0x800000000000ULL
232 /* Don't put lock into the LRU list, so that it is not canceled due to aging.
233 * Used by MGC locks, they are cancelled only at unmount or by callback. */
234 #define LDLM_FL_NO_LRU 0x1000000000000ULL
237 /* The blocking callback is overloaded to perform two functions. These flags
238 * indicate which operation should be performed. */
239 #define LDLM_CB_BLOCKING 1
240 #define LDLM_CB_CANCELING 2
242 /* compatibility matrix */
243 #define LCK_COMPAT_EX LCK_NL
244 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
245 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
246 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
247 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
248 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
249 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
250 #define LCK_COMPAT_COS (LCK_COS)
252 extern ldlm_mode_t lck_compat_array[];
254 static inline void lockmode_verify(ldlm_mode_t mode)
256 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
259 static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
261 return (lck_compat_array[exist_mode] & new_mode);
266 * cluster name spaces
270 #define DLM_OST_NAMESPACE 1
271 #define DLM_MDS_NAMESPACE 2
274 - do we just separate this by security domains and use a prefix for
275 multiple namespaces in the same domain?
285 * waiting_locks_spinlock
300 struct ldlm_resource;
301 struct ldlm_namespace;
303 struct ldlm_pool_ops {
304 int (*po_recalc)(struct ldlm_pool *pl);
305 int (*po_shrink)(struct ldlm_pool *pl, int nr,
306 unsigned int gfp_mask);
307 int (*po_setup)(struct ldlm_pool *pl, int limit);
311 * One second for pools thread check interval. Each pool has own period.
313 #define LDLM_POOLS_THREAD_PERIOD (1)
316 * ~6% margin for modest pools. See ldlm_pool.c for details.
318 #define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
321 * Default recalc period for server side pools in sec.
323 #define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
326 * Default recalc period for client side pools in sec.
328 #define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
332 * Pool proc directory.
334 cfs_proc_dir_entry_t *pl_proc_dir;
336 * Pool name, should be long enough to contain compound proc entry name.
340 * Lock for protecting slv/clv updates.
344 * Number of allowed locks in in pool, both, client and server side.
346 cfs_atomic_t pl_limit;
348 * Number of granted locks in
350 cfs_atomic_t pl_granted;
354 cfs_atomic_t pl_grant_rate;
358 cfs_atomic_t pl_cancel_rate;
360 * Server lock volume. Protected by pl_lock.
362 __u64 pl_server_lock_volume;
364 * Current biggest client lock volume. Protected by pl_lock.
366 __u64 pl_client_lock_volume;
368 * Lock volume factor. SLV on client is calculated as following:
369 * server_slv * lock_volume_factor.
371 cfs_atomic_t pl_lock_volume_factor;
373 * Time when last slv from server was obtained.
375 time_t pl_recalc_time;
377 * Recalc period for pool.
379 time_t pl_recalc_period;
381 * Recalc and shrink ops.
383 struct ldlm_pool_ops *pl_ops;
385 * Number of planned locks for next period.
391 struct lprocfs_stats *pl_stats;
394 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
395 void *req_cookie, ldlm_mode_t mode, __u64 flags,
398 typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
400 struct ldlm_valblock_ops {
401 int (*lvbo_init)(struct ldlm_resource *res);
402 int (*lvbo_update)(struct ldlm_resource *res,
403 struct ptlrpc_request *r,
405 int (*lvbo_free)(struct ldlm_resource *res);
406 /* Return size of lvb data appropriate RPC size can be reserved */
407 int (*lvbo_size)(struct ldlm_lock *lock);
408 /* Called to fill in lvb data to RPC buffer @buf */
409 int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
413 LDLM_NAMESPACE_GREEDY = 1 << 0,
414 LDLM_NAMESPACE_MODEST = 1 << 1
418 * Default values for the "max_nolock_size", "contention_time" and
419 * "contended_locks" namespace tunables.
421 #define NS_DEFAULT_MAX_NOLOCK_BYTES 0
422 #define NS_DEFAULT_CONTENTION_SECONDS 2
423 #define NS_DEFAULT_CONTENDED_LOCKS 32
425 struct ldlm_ns_bucket {
427 struct ldlm_namespace *nsb_namespace;
428 /** estimated lock callback time */
429 struct adaptive_timeout nsb_at_estimate;
433 /** ldlm namespace lock stats */
440 LDLM_NS_TYPE_UNKNOWN = 0,
455 struct ldlm_namespace {
457 * Backward link to obd, required for ldlm pool to store new SLV.
459 struct obd_device *ns_obd;
462 * Is this a client-side lock tree?
464 ldlm_side_t ns_client;
469 cfs_hash_t *ns_rs_hash;
477 * big refcount (by bucket)
479 cfs_atomic_t ns_bref;
482 * Namespce connect flags supported by server (may be changed via proc,
483 * lru resize may be disabled/enabled).
485 __u64 ns_connect_flags;
488 * Client side orig connect flags supported by server.
490 __u64 ns_orig_connect_flags;
493 * Position in global namespace list.
495 cfs_list_t ns_list_chain;
498 * All root resources in namespace.
500 cfs_list_t ns_unused_list;
503 unsigned int ns_max_unused;
504 unsigned int ns_max_age;
505 unsigned int ns_timeouts;
509 unsigned int ns_ctime_age_limit;
512 * Next debug dump, jiffies.
514 cfs_time_t ns_next_dump;
516 ldlm_res_policy ns_policy;
517 struct ldlm_valblock_ops *ns_lvbo;
519 cfs_waitq_t ns_waitq;
520 struct ldlm_pool ns_pool;
521 ldlm_appetite_t ns_appetite;
524 * If more than \a ns_contended_locks found, the resource is considered
527 unsigned ns_contended_locks;
530 * The resource remembers contended state during \a ns_contention_time,
533 unsigned ns_contention_time;
536 * Limit size of nolock requests, in bytes.
538 unsigned ns_max_nolock_size;
541 * Limit of parallel AST RPC count.
543 unsigned ns_max_parallel_ast;
545 /* callback to cancel locks before replaying it during recovery */
546 ldlm_cancel_for_recovery ns_cancel_for_recovery;
550 struct lprocfs_stats *ns_stats;
552 unsigned ns_stopping:1; /* namespace cleanup */
555 static inline int ns_is_client(struct ldlm_namespace *ns)
558 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
559 LDLM_NAMESPACE_SERVER)));
560 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
561 ns->ns_client == LDLM_NAMESPACE_SERVER);
562 return ns->ns_client == LDLM_NAMESPACE_CLIENT;
565 static inline int ns_is_server(struct ldlm_namespace *ns)
568 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
569 LDLM_NAMESPACE_SERVER)));
570 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
571 ns->ns_client == LDLM_NAMESPACE_SERVER);
572 return ns->ns_client == LDLM_NAMESPACE_SERVER;
575 static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
578 return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
581 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
584 return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
587 static inline void ns_register_cancel(struct ldlm_namespace *ns,
588 ldlm_cancel_for_recovery arg)
591 ns->ns_cancel_for_recovery = arg;
596 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
597 struct ldlm_lock_desc *new, void *data,
599 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
601 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
602 typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
604 struct ldlm_glimpse_work {
605 struct ldlm_lock *gl_lock; /* lock to glimpse */
606 cfs_list_t gl_list; /* linkage to other gl work structs */
607 __u32 gl_flags;/* see LDLM_GL_WORK_* below */
608 union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
609 * glimpse callback request */
612 /* the ldlm_glimpse_work is allocated on the stack and should not be freed */
613 #define LDLM_GL_WORK_NOFREE 0x1
615 /* Interval node data for each LDLM_EXTENT lock */
616 struct ldlm_interval {
617 struct interval_node li_node; /* node for tree mgmt */
618 cfs_list_t li_group; /* the locks which have the same
619 * policy - group of the policy */
621 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
623 /* the interval tree must be accessed inside the resource lock. */
624 struct ldlm_interval_tree {
625 /* tree size, this variable is used to count
626 * granted PW locks in ldlm_extent_policy()*/
628 ldlm_mode_t lit_mode; /* lock mode */
629 struct interval_node *lit_root; /* actually ldlm_interval */
632 #define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
636 LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
637 LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
638 LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
640 } ldlm_cancel_flags_t;
646 __u64 blocking_owner;
647 struct obd_export *blocking_export;
648 /* Protected by the hash lock */
654 struct ldlm_extent l_extent;
655 struct ldlm_flock l_flock;
656 struct ldlm_inodebits l_inodebits;
657 } ldlm_policy_data_t;
659 void ldlm_convert_policy_to_wire(ldlm_type_t type,
660 const ldlm_policy_data_t *lpolicy,
661 ldlm_wire_policy_data_t *wpolicy);
662 void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
663 const ldlm_wire_policy_data_t *wpolicy,
664 ldlm_policy_data_t *lpolicy);
668 * Must be first in the structure.
670 struct portals_handle l_handle;
672 * Lock reference count.
676 * Internal spinlock protects l_resource. we should hold this lock
677 * first before grabbing res_lock.
681 * ldlm_lock_change_resource() can change this.
683 struct ldlm_resource *l_resource;
685 * Protected by ns_hash_lock. List item for client side lru list.
689 * Protected by lr_lock, linkage to resource's lock queues.
691 cfs_list_t l_res_link;
693 * Tree node for ldlm_extent.
695 struct ldlm_interval *l_tree_node;
697 * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
700 cfs_hlist_node_t l_exp_hash;
702 * Protected by lr_lock. Requested mode.
705 * Protected by per-bucket exp->exp_flock_hash locks. Per export hash
708 cfs_hlist_node_t l_exp_flock_hash;
710 ldlm_mode_t l_req_mode;
712 * Granted mode, also protected by lr_lock.
714 ldlm_mode_t l_granted_mode;
716 * Lock enqueue completion handler.
718 ldlm_completion_callback l_completion_ast;
720 * Lock blocking ast handler.
722 ldlm_blocking_callback l_blocking_ast;
724 * Lock glimpse handler.
726 ldlm_glimpse_callback l_glimpse_ast;
727 ldlm_weigh_callback l_weigh_ast;
732 struct obd_export *l_export;
734 * Lock connection export.
736 struct obd_export *l_conn_export;
739 * Remote lock handle.
741 struct lustre_handle l_remote_handle;
743 ldlm_policy_data_t l_policy_data;
746 * Protected by lr_lock. Various counters: readers, writers, etc.
752 * If the lock is granted, a process sleeps on this waitq to learn when
753 * it's no longer in use. If the lock is not granted, a process sleeps
754 * on this waitq to learn when it becomes granted.
759 * Seconds. it will be updated if there is any activity related to
760 * the lock, e.g. enqueue the lock or send block AST.
762 cfs_time_t l_last_activity;
765 * Jiffies. Should be converted to time if needed.
767 cfs_time_t l_last_used;
769 struct ldlm_extent l_req_extent;
771 unsigned int l_failed:1,
773 * Set for locks that were removed from class hash table and will be
774 * destroyed when last reference to them is released. Set by
775 * ldlm_lock_destroy_internal().
777 * Protected by lock and resource locks.
781 * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
783 * NB: compare with check_res_locked(), check this bit is cheaper,
784 * also, spin_is_locked() is deprecated for kernel code, one reason is
785 * because it works only for SMP so user needs add extra macros like
786 * LASSERT_SPIN_LOCKED for uniprocessor kernels.
790 * it's set once we call ldlm_add_waiting_lock_res_locked()
791 * to start the lock-timeout timer and it will never be reset.
793 * Protected by lock_res_and_lock().
797 * flag whether this is a server namespace lock.
802 * Client-side-only members.
806 * Temporary storage for an LVB received during an enqueue operation.
814 * Server-side-only members.
817 /** connection cookie for the client originated the operation. */
818 __u64 l_client_cookie;
821 * Protected by elt_lock. Callbacks pending.
823 cfs_list_t l_pending_chain;
825 cfs_time_t l_callback_timeout;
828 * Pid which created this lock.
834 * For ldlm_add_ast_work_item().
838 * For ldlm_add_ast_work_item().
842 * For ldlm_add_ast_work_item().
846 struct ldlm_lock *l_blocking_lock;
849 * Protected by lr_lock, linkages to "skip lists".
851 cfs_list_t l_sl_mode;
852 cfs_list_t l_sl_policy;
853 struct lu_ref l_reference;
854 #if LUSTRE_TRACKS_LOCK_EXP_REFS
855 /* Debugging stuff for bug 20498, for tracking export
857 /** number of export references taken */
859 /** link all locks referencing one export */
860 cfs_list_t l_exp_refs_link;
861 /** referenced export object */
862 struct obd_export *l_exp_refs_target;
864 /** export blocking dlm lock list, protected by
865 * l_export->exp_bl_list_lock.
866 * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
867 * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */
868 cfs_list_t l_exp_list;
871 struct ldlm_resource {
872 struct ldlm_ns_bucket *lr_ns_bucket;
874 /* protected by ns_hash_lock */
875 cfs_hlist_node_t lr_hash;
878 /* protected by lr_lock */
879 cfs_list_t lr_granted;
880 cfs_list_t lr_converting;
881 cfs_list_t lr_waiting;
882 ldlm_mode_t lr_most_restr;
883 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
884 struct ldlm_res_id lr_name;
885 cfs_atomic_t lr_refcount;
887 struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
889 /* Server-side-only lock value block elements */
890 /** to serialize lvbo_init */
891 struct mutex lr_lvb_mutex;
893 /** protect by lr_lock */
896 /* when the resource was considered as contended */
897 cfs_time_t lr_contention_time;
899 * List of references to this resource. For debugging.
901 struct lu_ref lr_reference;
903 struct inode *lr_lvb_inode;
907 ldlm_ns_name(struct ldlm_namespace *ns)
909 return ns->ns_rs_hash->hs_name;
912 static inline struct ldlm_namespace *
913 ldlm_res_to_ns(struct ldlm_resource *res)
915 return res->lr_ns_bucket->nsb_namespace;
918 static inline struct ldlm_namespace *
919 ldlm_lock_to_ns(struct ldlm_lock *lock)
921 return ldlm_res_to_ns(lock->l_resource);
925 ldlm_lock_to_ns_name(struct ldlm_lock *lock)
927 return ldlm_ns_name(ldlm_lock_to_ns(lock));
930 static inline struct adaptive_timeout *
931 ldlm_lock_to_ns_at(struct ldlm_lock *lock)
933 return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
936 static inline int ldlm_lvbo_init(struct ldlm_resource *res)
938 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
940 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
941 return ns->ns_lvbo->lvbo_init(res);
946 static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
948 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
950 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
951 return ns->ns_lvbo->lvbo_size(lock);
956 static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
958 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
960 if (ns->ns_lvbo != NULL) {
961 LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
962 return ns->ns_lvbo->lvbo_fill(lock, buf, len);
967 struct ldlm_ast_work {
968 struct ldlm_lock *w_lock;
970 struct ldlm_lock_desc w_desc;
977 /* ldlm_enqueue parameters common */
978 struct ldlm_enqueue_info {
979 __u32 ei_type; /* Type of the lock being enqueued. */
980 __u32 ei_mode; /* Mode of the lock being enqueued. */
981 void *ei_cb_bl; /* blocking lock callback */
982 void *ei_cb_cp; /* lock completion callback */
983 void *ei_cb_gl; /* lock glimpse callback */
984 void *ei_cb_wg; /* lock weigh callback */
985 void *ei_cbdata; /* Data to be passed into callbacks. */
988 extern struct obd_ops ldlm_obd_ops;
990 extern char *ldlm_lockname[];
991 extern char *ldlm_typename[];
992 extern char *ldlm_it2str(int it);
994 #define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
995 CFS_CHECK_STACK(msgdata, mask, cdls); \
997 if (((mask) & D_CANTMASK) != 0 || \
998 ((libcfs_debug & (mask)) != 0 && \
999 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1000 _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
1003 void _ldlm_lock_debug(struct ldlm_lock *lock,
1004 struct libcfs_debug_msg_data *data,
1005 const char *fmt, ...)
1006 __attribute__ ((format (printf, 3, 4)));
1008 #define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
1009 static cfs_debug_limit_state_t _ldlm_cdls; \
1010 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
1011 ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
1014 #define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
1015 #define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
1017 #define LDLM_DEBUG(lock, fmt, a...) do { \
1018 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
1019 ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, "### " fmt , ##a);\
1021 #else /* !LIBCFS_DEBUG */
1022 # define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
1023 # define LDLM_DEBUG(lock, fmt, a...) ((void)0)
1024 # define LDLM_ERROR(lock, fmt, a...) ((void)0)
1027 #define LDLM_DEBUG_NOLOCK(format, a...) \
1028 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
1030 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
1031 int first_enq, ldlm_error_t *err,
1032 cfs_list_t *work_list);
1038 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
1039 #define LDLM_ITER_STOP 2 /* stop iterating */
1041 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
1042 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
1044 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
1046 void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
1049 int ldlm_replay_locks(struct obd_import *imp);
1050 int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
1051 ldlm_iterator_t iter, void *data);
1054 int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1057 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
1059 struct ldlm_callback_suite {
1060 ldlm_completion_callback lcs_completion;
1061 ldlm_blocking_callback lcs_blocking;
1062 ldlm_glimpse_callback lcs_glimpse;
1063 ldlm_weigh_callback lcs_weigh;
1067 #ifdef HAVE_SERVER_SUPPORT
1068 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
1069 void *data, int flag);
1070 int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1071 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
1072 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
1073 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
1074 ldlm_blocking_callback, ldlm_glimpse_callback);
1075 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
1076 const struct ldlm_request *dlm_req,
1077 const struct ldlm_callback_suite *cbs);
1078 int ldlm_handle_convert(struct ptlrpc_request *req);
1079 int ldlm_handle_convert0(struct ptlrpc_request *req,
1080 const struct ldlm_request *dlm_req);
1081 int ldlm_handle_cancel(struct ptlrpc_request *req);
1082 int ldlm_request_cancel(struct ptlrpc_request *req,
1083 const struct ldlm_request *dlm_req, int first);
1084 void ldlm_revoke_export_locks(struct obd_export *exp);
1086 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
1087 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
1088 int ldlm_get_ref(void);
1089 void ldlm_put_ref(void);
1090 int ldlm_init_export(struct obd_export *exp);
1091 void ldlm_destroy_export(struct obd_export *exp);
1092 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
1095 #ifdef HAVE_SERVER_SUPPORT
1096 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
1098 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
1099 void ldlm_lock2handle(const struct ldlm_lock *lock,
1100 struct lustre_handle *lockh);
1101 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
1102 void ldlm_cancel_callback(struct ldlm_lock *);
1103 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
1104 int ldlm_lock_set_data(struct lustre_handle *, void *);
1106 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
1108 return __ldlm_handle2lock(h, 0);
1111 #define LDLM_LOCK_REF_DEL(lock) \
1112 lu_ref_del(&lock->l_reference, "handle", cfs_current())
1114 static inline struct ldlm_lock *
1115 ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
1117 struct ldlm_lock *lock;
1119 lock = __ldlm_handle2lock(h, flags);
1121 LDLM_LOCK_REF_DEL(lock);
1125 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
1126 struct ptlrpc_request *r, int increase)
1128 if (ldlm_res_to_ns(res)->ns_lvbo &&
1129 ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
1130 return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
1136 int ldlm_error2errno(ldlm_error_t error);
1137 ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
1138 * confuses user-space. */
1139 #if LUSTRE_TRACKS_LOCK_EXP_REFS
1140 void ldlm_dump_export_locks(struct obd_export *exp);
1144 * Release a temporary lock reference obtained by ldlm_handle2lock() or
1145 * __ldlm_handle2lock().
1147 #define LDLM_LOCK_PUT(lock) \
1149 LDLM_LOCK_REF_DEL(lock); \
1150 /*LDLM_DEBUG((lock), "put");*/ \
1151 ldlm_lock_put(lock); \
1155 * Release a lock reference obtained by some other means (see
1158 #define LDLM_LOCK_RELEASE(lock) \
1160 /*LDLM_DEBUG((lock), "put");*/ \
1161 ldlm_lock_put(lock); \
1164 #define LDLM_LOCK_GET(lock) \
1166 ldlm_lock_get(lock); \
1167 /*LDLM_DEBUG((lock), "get");*/ \
1171 #define ldlm_lock_list_put(head, member, count) \
1173 struct ldlm_lock *_lock, *_next; \
1175 cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
1178 cfs_list_del_init(&_lock->member); \
1179 LDLM_LOCK_RELEASE(_lock); \
1184 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1185 void ldlm_lock_put(struct ldlm_lock *lock);
1186 void ldlm_lock_destroy(struct ldlm_lock *lock);
1187 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1188 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
1189 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
1190 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
1191 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
1192 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1193 void ldlm_lock_fail_match(struct ldlm_lock *lock);
1194 void ldlm_lock_allow_match(struct ldlm_lock *lock);
1195 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1196 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1197 const struct ldlm_res_id *, ldlm_type_t type,
1198 ldlm_policy_data_t *, ldlm_mode_t mode,
1199 struct lustre_handle *, int unref);
1200 ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1202 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1204 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
1205 void ldlm_lock_cancel(struct ldlm_lock *lock);
1206 void ldlm_reprocess_all(struct ldlm_resource *res);
1207 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
1208 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
1209 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
1212 struct ldlm_namespace *
1213 ldlm_namespace_new(struct obd_device *obd, char *name,
1214 ldlm_side_t client, ldlm_appetite_t apt,
1215 ldlm_ns_type_t ns_type);
1216 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
1217 void ldlm_namespace_free(struct ldlm_namespace *ns,
1218 struct obd_import *imp, int force);
1219 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
1220 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
1221 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
1222 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
1223 void ldlm_namespace_get(struct ldlm_namespace *ns);
1224 void ldlm_namespace_put(struct ldlm_namespace *ns);
1225 int ldlm_proc_setup(void);
1227 void ldlm_proc_cleanup(void);
1229 static inline void ldlm_proc_cleanup(void) {}
1232 /* resource.c - internal */
1233 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
1234 struct ldlm_resource *parent,
1235 const struct ldlm_res_id *,
1236 ldlm_type_t type, int create);
1237 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
1238 int ldlm_resource_putref(struct ldlm_resource *res);
1239 void ldlm_resource_add_lock(struct ldlm_resource *res,
1241 struct ldlm_lock *lock);
1242 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
1243 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
1244 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
1245 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
1246 void ldlm_resource_dump(int level, struct ldlm_resource *);
1247 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
1248 const struct ldlm_res_id *);
1250 #define LDLM_RESOURCE_ADDREF(res) do { \
1251 lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
1254 #define LDLM_RESOURCE_DELREF(res) do { \
1255 lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
1258 /* ldlm_request.c */
1259 int ldlm_expired_completion_wait(void *data);
1260 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
1261 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1262 void *data, int flag);
1263 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
1264 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
1265 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1266 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
1267 struct ldlm_enqueue_info *einfo,
1268 const struct ldlm_res_id *res_id,
1269 ldlm_policy_data_t const *policy, __u64 *flags,
1270 void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
1272 int ldlm_prep_enqueue_req(struct obd_export *exp,
1273 struct ptlrpc_request *req,
1274 cfs_list_t *cancels,
1276 int ldlm_prep_elc_req(struct obd_export *exp,
1277 struct ptlrpc_request *req,
1278 int version, int opc, int canceloff,
1279 cfs_list_t *cancels, int count);
1280 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
1281 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
1282 __u64 *flags, void *lvb, __u32 lvb_len,
1283 struct lustre_handle *lockh, int rc);
1284 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
1285 const struct ldlm_res_id *res_id,
1286 ldlm_type_t type, ldlm_policy_data_t *policy,
1287 ldlm_mode_t mode, __u64 *flags,
1288 ldlm_blocking_callback blocking,
1289 ldlm_completion_callback completion,
1290 ldlm_glimpse_callback glimpse,
1291 void *data, __u32 lvb_len,
1292 const __u64 *client_cookie,
1293 struct lustre_handle *lockh);
1294 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
1295 void *data, __u32 data_len);
1296 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
1297 int ldlm_cli_update_pool(struct ptlrpc_request *req);
1298 int ldlm_cli_cancel(struct lustre_handle *lockh);
1299 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
1300 ldlm_cancel_flags_t flags, void *opaque);
1301 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1302 const struct ldlm_res_id *res_id,
1303 ldlm_policy_data_t *policy,
1305 ldlm_cancel_flags_t flags,
1307 int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
1308 int count, ldlm_cancel_flags_t flags);
1309 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1310 cfs_list_t *cancels,
1311 ldlm_policy_data_t *policy,
1312 ldlm_mode_t mode, int lock_flags,
1313 ldlm_cancel_flags_t cancel_flags, void *opaque);
1314 int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
1315 ldlm_cancel_flags_t flags);
1316 int ldlm_cli_cancel_list(cfs_list_t *head, int count,
1317 struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
1320 /* This has to be here because recursive inclusion sucks. */
1321 int intent_disposition(struct ldlm_reply *rep, int flag);
1322 void intent_set_disposition(struct ldlm_reply *rep, int flag);
1325 /* ioctls for trying requests */
1326 #define IOC_LDLM_TYPE 'f'
1327 #define IOC_LDLM_MIN_NR 40
1329 #define IOC_LDLM_TEST _IOWR('f', 40, long)
1330 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
1331 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
1332 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
1333 #define IOC_LDLM_MAX_NR 43
1336 * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
1337 * than one lock_res is dead-lock safe.
1339 enum lock_res_type {
1344 static inline void lock_res(struct ldlm_resource *res)
1346 spin_lock(&res->lr_lock);
1349 static inline void lock_res_nested(struct ldlm_resource *res,
1350 enum lock_res_type mode)
1352 spin_lock_nested(&res->lr_lock, mode);
1355 static inline void unlock_res(struct ldlm_resource *res)
1357 spin_unlock(&res->lr_lock);
1360 static inline void check_res_locked(struct ldlm_resource *res)
1362 LASSERT_SPIN_LOCKED(&res->lr_lock);
1365 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
1366 void unlock_res_and_lock(struct ldlm_lock *lock);
1369 void ldlm_pools_recalc(ldlm_side_t client);
1370 int ldlm_pools_init(void);
1371 void ldlm_pools_fini(void);
1373 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1374 int idx, ldlm_side_t client);
1375 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
1376 unsigned int gfp_mask);
1377 void ldlm_pool_fini(struct ldlm_pool *pl);
1378 int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
1379 int ldlm_pool_recalc(struct ldlm_pool *pl);
1380 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
1381 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
1382 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
1383 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
1384 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
1385 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
1386 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
1387 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1388 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);