4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef _LUSTRE_DLM_H__
38 #define _LUSTRE_DLM_H__
40 /** \defgroup ldlm ldlm
45 #if defined(__linux__)
46 #include <linux/lustre_dlm.h>
47 #elif defined(__APPLE__)
48 #include <darwin/lustre_dlm.h>
49 #elif defined(__WINNT__)
50 #include <winnt/lustre_dlm.h>
52 #error Unsupported operating system.
55 #include <lustre_lib.h>
56 #include <lustre_net.h>
57 #include <lustre_import.h>
58 #include <lustre_handles.h>
59 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
65 #define OBD_LDLM_DEVICENAME "ldlm"
67 #ifdef HAVE_BGL_SUPPORT
68 /* 1.5 times the maximum 128 tasks available in VN mode */
69 #define LDLM_DEFAULT_LRU_SIZE 196
71 #define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
73 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
74 #define LDLM_CTIME_AGE_LIMIT (10)
75 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
80 ELDLM_LOCK_CHANGED = 300,
81 ELDLM_LOCK_ABORTED = 301,
82 ELDLM_LOCK_REPLACED = 302,
83 ELDLM_NO_LOCK_DATA = 303,
85 ELDLM_NAMESPACE_EXISTS = 400,
86 ELDLM_BAD_NAMESPACE = 401
90 LDLM_NAMESPACE_SERVER = 1 << 0,
91 LDLM_NAMESPACE_CLIENT = 1 << 1
94 #define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
96 /* If the server returns one of these flags, then the lock was put on that list.
97 * If the client sends one of these flags (during recovery ONLY!), it wants the
98 * lock added to the specified list, no questions asked. -p */
99 #define LDLM_FL_BLOCK_GRANTED 0x000002
100 #define LDLM_FL_BLOCK_CONV 0x000004
101 #define LDLM_FL_BLOCK_WAIT 0x000008
103 #define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
104 #define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
105 * queued for sending. */
106 #define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
107 #define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
109 /* Lock is being replayed. This could probably be implied by the fact that one
110 * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
111 #define LDLM_FL_REPLAY 0x000100
113 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
114 #define LDLM_FL_LOCAL_ONLY 0x000400
116 /* don't run the cancel callback under ldlm_cli_cancel_unused */
117 #define LDLM_FL_FAILED 0x000800
119 #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
120 #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
121 #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
122 #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
124 #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
127 /* file & record locking */
128 #define LDLM_FL_BLOCK_NOWAIT 0x040000 /* server told not to wait if blocked.
129 * For AGL, OST will not send glimpse
131 #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
133 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
134 * the LVB filling happens _after_ the lock has been granted, so another thread
135 * can match`t before the LVB has been updated. As a dirty hack, we set
136 * LDLM_FL_LVB_READY only after we've done the LVB poop.
137 * this is only needed on lov/osc now, where lvb is actually used and callers
138 * must set it in input flags.
140 * The proper fix is to do the granting inside of the completion AST, which can
141 * be replaced with a LVB-aware wrapping function for OSC locks. That change is
142 * pretty high-risk, though, and would need a lot more testing. */
144 #define LDLM_FL_LVB_READY 0x100000
146 /* A lock contributes to the kms calculation until it has finished the part
147 * of it's cancelation that performs write back on its dirty pages. It
148 * can remain on the granted list during this whole time. Threads racing
149 * to update the kms after performing their writeback need to know to
150 * exclude each others locks from the calculation as they walk the granted
152 #define LDLM_FL_KMS_IGNORE 0x200000
154 /* Don't put lock into the LRU list, so that it is not canceled due to aging.
155 * Used by MGC locks, they are cancelled only at unmount or by callback. */
156 #define LDLM_FL_NO_LRU 0x400000
158 /* Immediatelly cancel such locks when they block some other locks. Send
159 * cancel notification to original lock holder, but expect no reply. This is
160 * for clients (like liblustre) that cannot be expected to reliably response
161 * to blocking ast. */
162 #define LDLM_FL_CANCEL_ON_BLOCK 0x800000
164 /* Flags flags inherited from parent lock when doing intents. */
165 #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
167 /* completion ast to be executed */
168 #define LDLM_FL_CP_REQD 0x1000000
170 /* cleanup_resource has already handled the lock */
171 #define LDLM_FL_CLEANED 0x2000000
173 /* optimization hint: LDLM can run blocking callback from current context
174 * w/o involving separate thread. in order to decrease cs rate */
175 #define LDLM_FL_ATOMIC_CB 0x4000000
177 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
178 * such that server send blocking ast for conflict locks to this client for
179 * the 1st operation, whereas the 2nd operation has canceled this lock and
180 * is waiting for rpc_lock which is taken by the 1st operation.
181 * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
182 * ELC code to cancel it.
183 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
184 * droped to let ldlm_callback_handler() return EINVAL to the server. It is
185 * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
186 * to send a separate CANCEL rpc. */
187 #define LDLM_FL_BL_AST 0x10000000
188 #define LDLM_FL_BL_DONE 0x20000000
190 /* measure lock contention and return -EUSERS if locking contention is high */
191 #define LDLM_FL_DENY_ON_CONTENTION 0x40000000
193 /* These are flags that are mapped into the flags and ASTs of blocking locks */
194 #define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
196 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
197 #define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
200 * --------------------------------------------------------------------------
201 * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
202 * 0x80000000 will not be sent over the wire.
203 * --------------------------------------------------------------------------
206 /* Used for marking lock as an target for -EINTR while cp_ast sleep
207 * emulation + race with upcoming bl_ast. */
208 #define LDLM_FL_FAIL_LOC 0x100000000ULL
210 /* Used while processing the unused list to know that we have already
211 * handled this lock and decided to skip it */
212 #define LDLM_FL_SKIPPED 0x200000000ULL
214 /* The blocking callback is overloaded to perform two functions. These flags
215 * indicate which operation should be performed. */
216 #define LDLM_CB_BLOCKING 1
217 #define LDLM_CB_CANCELING 2
219 /* compatibility matrix */
220 #define LCK_COMPAT_EX LCK_NL
221 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
222 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
223 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
224 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
225 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
226 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
227 #define LCK_COMPAT_COS (LCK_COS)
229 extern ldlm_mode_t lck_compat_array[];
231 static inline void lockmode_verify(ldlm_mode_t mode)
233 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
236 static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
238 return (lck_compat_array[exist_mode] & new_mode);
243 * cluster name spaces
247 #define DLM_OST_NAMESPACE 1
248 #define DLM_MDS_NAMESPACE 2
251 - do we just separate this by security domains and use a prefix for
252 multiple namespaces in the same domain?
262 * waiting_locks_spinlock
277 struct ldlm_resource;
278 struct ldlm_namespace;
280 struct ldlm_pool_ops {
281 int (*po_recalc)(struct ldlm_pool *pl);
282 int (*po_shrink)(struct ldlm_pool *pl, int nr,
283 unsigned int gfp_mask);
284 int (*po_setup)(struct ldlm_pool *pl, int limit);
288 * One second for pools thread check interval. Each pool has own period.
290 #define LDLM_POOLS_THREAD_PERIOD (1)
293 * ~6% margin for modest pools. See ldlm_pool.c for details.
295 #define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
298 * Default recalc period for server side pools in sec.
300 #define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
303 * Default recalc period for client side pools in sec.
305 #define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
309 * Pool proc directory.
311 cfs_proc_dir_entry_t *pl_proc_dir;
313 * Pool name, should be long enough to contain compound proc entry name.
317 * Lock for protecting slv/clv updates.
319 cfs_spinlock_t pl_lock;
321 * Number of allowed locks in in pool, both, client and server side.
323 cfs_atomic_t pl_limit;
325 * Number of granted locks in
327 cfs_atomic_t pl_granted;
331 cfs_atomic_t pl_grant_rate;
335 cfs_atomic_t pl_cancel_rate;
337 * Server lock volume. Protected by pl_lock.
339 __u64 pl_server_lock_volume;
341 * Current biggest client lock volume. Protected by pl_lock.
343 __u64 pl_client_lock_volume;
345 * Lock volume factor. SLV on client is calculated as following:
346 * server_slv * lock_volume_factor.
348 cfs_atomic_t pl_lock_volume_factor;
350 * Time when last slv from server was obtained.
352 time_t pl_recalc_time;
354 * Recalc period for pool.
356 time_t pl_recalc_period;
358 * Recalc and shrink ops.
360 struct ldlm_pool_ops *pl_ops;
362 * Number of planned locks for next period.
368 struct lprocfs_stats *pl_stats;
371 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
372 void *req_cookie, ldlm_mode_t mode, int flags,
375 typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
377 struct ldlm_valblock_ops {
378 int (*lvbo_init)(struct ldlm_resource *res);
379 int (*lvbo_update)(struct ldlm_resource *res,
380 struct ptlrpc_request *r,
382 int (*lvbo_free)(struct ldlm_resource *res);
383 /* Return size of lvb data appropriate RPC size can be reserved */
384 int (*lvbo_size)(struct ldlm_lock *lock);
385 /* Called to fill in lvb data to RPC buffer @buf */
386 int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
390 LDLM_NAMESPACE_GREEDY = 1 << 0,
391 LDLM_NAMESPACE_MODEST = 1 << 1
395 * Default values for the "max_nolock_size", "contention_time" and
396 * "contended_locks" namespace tunables.
398 #define NS_DEFAULT_MAX_NOLOCK_BYTES 0
399 #define NS_DEFAULT_CONTENTION_SECONDS 2
400 #define NS_DEFAULT_CONTENDED_LOCKS 32
402 struct ldlm_ns_bucket {
404 struct ldlm_namespace *nsb_namespace;
405 /** estimated lock callback time */
406 struct adaptive_timeout nsb_at_estimate;
410 /** ldlm namespace lock stats */
417 LDLM_NS_TYPE_UNKNOWN = 0,
432 struct ldlm_namespace {
434 * Backward link to obd, required for ldlm pool to store new SLV.
436 struct obd_device *ns_obd;
439 * Is this a client-side lock tree?
441 ldlm_side_t ns_client;
446 cfs_hash_t *ns_rs_hash;
451 cfs_spinlock_t ns_lock;
454 * big refcount (by bucket)
456 cfs_atomic_t ns_bref;
459 * Namespce connect flags supported by server (may be changed via proc,
460 * lru resize may be disabled/enabled).
462 __u64 ns_connect_flags;
465 * Client side orig connect flags supported by server.
467 __u64 ns_orig_connect_flags;
470 * Position in global namespace list.
472 cfs_list_t ns_list_chain;
475 * All root resources in namespace.
477 cfs_list_t ns_unused_list;
480 unsigned int ns_max_unused;
481 unsigned int ns_max_age;
482 unsigned int ns_timeouts;
486 unsigned int ns_ctime_age_limit;
489 * Next debug dump, jiffies.
491 cfs_time_t ns_next_dump;
493 ldlm_res_policy ns_policy;
494 struct ldlm_valblock_ops *ns_lvbo;
496 cfs_waitq_t ns_waitq;
497 struct ldlm_pool ns_pool;
498 ldlm_appetite_t ns_appetite;
501 * If more than \a ns_contended_locks found, the resource is considered
504 unsigned ns_contended_locks;
507 * The resource remembers contended state during \a ns_contention_time,
510 unsigned ns_contention_time;
513 * Limit size of nolock requests, in bytes.
515 unsigned ns_max_nolock_size;
518 * Limit of parallel AST RPC count.
520 unsigned ns_max_parallel_ast;
522 /* callback to cancel locks before replaying it during recovery */
523 ldlm_cancel_for_recovery ns_cancel_for_recovery;
527 struct lprocfs_stats *ns_stats;
529 unsigned ns_stopping:1; /* namespace cleanup */
532 static inline int ns_is_client(struct ldlm_namespace *ns)
535 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
536 LDLM_NAMESPACE_SERVER)));
537 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
538 ns->ns_client == LDLM_NAMESPACE_SERVER);
539 return ns->ns_client == LDLM_NAMESPACE_CLIENT;
542 static inline int ns_is_server(struct ldlm_namespace *ns)
545 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
546 LDLM_NAMESPACE_SERVER)));
547 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
548 ns->ns_client == LDLM_NAMESPACE_SERVER);
549 return ns->ns_client == LDLM_NAMESPACE_SERVER;
552 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
555 return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
558 static inline void ns_register_cancel(struct ldlm_namespace *ns,
559 ldlm_cancel_for_recovery arg)
562 ns->ns_cancel_for_recovery = arg;
567 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
568 struct ldlm_lock_desc *new, void *data,
570 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
572 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
573 typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
575 struct ldlm_glimpse_work {
576 struct ldlm_lock *gl_lock; /* lock to glimpse */
577 cfs_list_t gl_list; /* linkage to other gl work structs */
578 __u32 gl_flags;/* see LDLM_GL_WORK_* below */
579 union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
580 * glimpse callback request */
583 /* the ldlm_glimpse_work is allocated on the stack and should not be freed */
584 #define LDLM_GL_WORK_NOFREE 0x1
586 /* Interval node data for each LDLM_EXTENT lock */
587 struct ldlm_interval {
588 struct interval_node li_node; /* node for tree mgmt */
589 cfs_list_t li_group; /* the locks which have the same
590 * policy - group of the policy */
592 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
594 /* the interval tree must be accessed inside the resource lock. */
595 struct ldlm_interval_tree {
596 /* tree size, this variable is used to count
597 * granted PW locks in ldlm_extent_policy()*/
599 ldlm_mode_t lit_mode; /* lock mode */
600 struct interval_node *lit_root; /* actually ldlm_interval */
603 #define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
607 LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
608 LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
609 LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
611 } ldlm_cancel_flags_t;
617 __u64 blocking_owner;
618 struct obd_export *blocking_export;
619 /* Protected by the hash lock */
625 struct ldlm_extent l_extent;
626 struct ldlm_flock l_flock;
627 struct ldlm_inodebits l_inodebits;
628 } ldlm_policy_data_t;
630 void ldlm_convert_policy_to_wire(ldlm_type_t type,
631 const ldlm_policy_data_t *lpolicy,
632 ldlm_wire_policy_data_t *wpolicy);
633 void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
634 const ldlm_wire_policy_data_t *wpolicy,
635 ldlm_policy_data_t *lpolicy);
639 * Must be first in the structure.
641 struct portals_handle l_handle;
643 * Lock reference count.
647 * Internal spinlock protects l_resource. we should hold this lock
648 * first before grabbing res_lock.
650 cfs_spinlock_t l_lock;
652 * ldlm_lock_change_resource() can change this.
654 struct ldlm_resource *l_resource;
656 * Protected by ns_hash_lock. List item for client side lru list.
660 * Protected by lr_lock, linkage to resource's lock queues.
662 cfs_list_t l_res_link;
664 * Tree node for ldlm_extent.
666 struct ldlm_interval *l_tree_node;
668 * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
671 cfs_hlist_node_t l_exp_hash;
673 * Protected by lr_lock. Requested mode.
676 * Protected by per-bucket exp->exp_flock_hash locks. Per export hash
679 cfs_hlist_node_t l_exp_flock_hash;
681 ldlm_mode_t l_req_mode;
683 * Granted mode, also protected by lr_lock.
685 ldlm_mode_t l_granted_mode;
687 * Lock enqueue completion handler.
689 ldlm_completion_callback l_completion_ast;
691 * Lock blocking ast handler.
693 ldlm_blocking_callback l_blocking_ast;
695 * Lock glimpse handler.
697 ldlm_glimpse_callback l_glimpse_ast;
698 ldlm_weigh_callback l_weigh_ast;
703 struct obd_export *l_export;
705 * Lock connection export.
707 struct obd_export *l_conn_export;
710 * Remote lock handle.
712 struct lustre_handle l_remote_handle;
714 ldlm_policy_data_t l_policy_data;
717 * Protected by lr_lock. Various counters: readers, writers, etc.
723 * If the lock is granted, a process sleeps on this waitq to learn when
724 * it's no longer in use. If the lock is not granted, a process sleeps
725 * on this waitq to learn when it becomes granted.
730 * Seconds. it will be updated if there is any activity related to
731 * the lock, e.g. enqueue the lock or send block AST.
733 cfs_time_t l_last_activity;
736 * Jiffies. Should be converted to time if needed.
738 cfs_time_t l_last_used;
740 struct ldlm_extent l_req_extent;
742 unsigned int l_failed:1,
744 * Set for locks that were removed from class hash table and will be
745 * destroyed when last reference to them is released. Set by
746 * ldlm_lock_destroy_internal().
748 * Protected by lock and resource locks.
752 * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
754 * NB: compare with check_res_locked(), check this bit is cheaper,
755 * also, spin_is_locked() is deprecated for kernel code, one reason is
756 * because it works only for SMP so user needs add extra macros like
757 * LASSERT_SPIN_LOCKED for uniprocessor kernels.
761 * it's set once we call ldlm_add_waiting_lock_res_locked()
762 * to start the lock-timeout timer and it will never be reset.
764 * Protected by lock_res_and_lock().
768 * flag whether this is a server namespace lock.
773 * Client-side-only members.
777 * Temporary storage for an LVB received during an enqueue operation.
785 * Server-side-only members.
788 /** connection cookie for the client originated the operation. */
789 __u64 l_client_cookie;
792 * Protected by elt_lock. Callbacks pending.
794 cfs_list_t l_pending_chain;
796 cfs_time_t l_callback_timeout;
799 * Pid which created this lock.
805 * For ldlm_add_ast_work_item().
809 * For ldlm_add_ast_work_item().
813 * For ldlm_add_ast_work_item().
817 struct ldlm_lock *l_blocking_lock;
820 * Protected by lr_lock, linkages to "skip lists".
822 cfs_list_t l_sl_mode;
823 cfs_list_t l_sl_policy;
824 struct lu_ref l_reference;
825 #if LUSTRE_TRACKS_LOCK_EXP_REFS
826 /* Debugging stuff for bug 20498, for tracking export
828 /** number of export references taken */
830 /** link all locks referencing one export */
831 cfs_list_t l_exp_refs_link;
832 /** referenced export object */
833 struct obd_export *l_exp_refs_target;
835 /** export blocking dlm lock list, protected by
836 * l_export->exp_bl_list_lock.
837 * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
838 * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */
839 cfs_list_t l_exp_list;
842 struct ldlm_resource {
843 struct ldlm_ns_bucket *lr_ns_bucket;
845 /* protected by ns_hash_lock */
846 cfs_hlist_node_t lr_hash;
847 cfs_spinlock_t lr_lock;
849 /* protected by lr_lock */
850 cfs_list_t lr_granted;
851 cfs_list_t lr_converting;
852 cfs_list_t lr_waiting;
853 ldlm_mode_t lr_most_restr;
854 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
855 struct ldlm_res_id lr_name;
856 cfs_atomic_t lr_refcount;
858 struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
860 /* Server-side-only lock value block elements */
861 /** to serialize lvbo_init */
862 cfs_mutex_t lr_lvb_mutex;
864 /** protect by lr_lock */
867 /* when the resource was considered as contended */
868 cfs_time_t lr_contention_time;
870 * List of references to this resource. For debugging.
872 struct lu_ref lr_reference;
874 struct inode *lr_lvb_inode;
878 ldlm_ns_name(struct ldlm_namespace *ns)
880 return ns->ns_rs_hash->hs_name;
883 static inline struct ldlm_namespace *
884 ldlm_res_to_ns(struct ldlm_resource *res)
886 return res->lr_ns_bucket->nsb_namespace;
889 static inline struct ldlm_namespace *
890 ldlm_lock_to_ns(struct ldlm_lock *lock)
892 return ldlm_res_to_ns(lock->l_resource);
896 ldlm_lock_to_ns_name(struct ldlm_lock *lock)
898 return ldlm_ns_name(ldlm_lock_to_ns(lock));
901 static inline struct adaptive_timeout *
902 ldlm_lock_to_ns_at(struct ldlm_lock *lock)
904 return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
907 static inline int ldlm_lvbo_init(struct ldlm_resource *res)
909 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
911 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
912 return ns->ns_lvbo->lvbo_init(res);
917 static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
919 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
921 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
922 return ns->ns_lvbo->lvbo_size(lock);
927 static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
929 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
931 if (ns->ns_lvbo != NULL) {
932 LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
933 return ns->ns_lvbo->lvbo_fill(lock, buf, len);
938 struct ldlm_ast_work {
939 struct ldlm_lock *w_lock;
941 struct ldlm_lock_desc w_desc;
948 /* ldlm_enqueue parameters common */
949 struct ldlm_enqueue_info {
950 __u32 ei_type; /* Type of the lock being enqueued. */
951 __u32 ei_mode; /* Mode of the lock being enqueued. */
952 void *ei_cb_bl; /* blocking lock callback */
953 void *ei_cb_cp; /* lock completion callback */
954 void *ei_cb_gl; /* lock glimpse callback */
955 void *ei_cb_wg; /* lock weigh callback */
956 void *ei_cbdata; /* Data to be passed into callbacks. */
959 extern struct obd_ops ldlm_obd_ops;
961 extern char *ldlm_lockname[];
962 extern char *ldlm_typename[];
963 extern char *ldlm_it2str(int it);
965 #define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
966 CFS_CHECK_STACK(msgdata, mask, cdls); \
968 if (((mask) & D_CANTMASK) != 0 || \
969 ((libcfs_debug & (mask)) != 0 && \
970 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
971 _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
974 void _ldlm_lock_debug(struct ldlm_lock *lock,
975 struct libcfs_debug_msg_data *data,
976 const char *fmt, ...)
977 __attribute__ ((format (printf, 3, 4)));
979 #define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
980 static cfs_debug_limit_state_t _ldlm_cdls; \
981 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
982 ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
985 #define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
986 #define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
988 #define LDLM_DEBUG(lock, fmt, a...) do { \
989 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
990 ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, "### " fmt , ##a);\
992 #else /* !LIBCFS_DEBUG */
993 # define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
994 # define LDLM_DEBUG(lock, fmt, a...) ((void)0)
995 # define LDLM_ERROR(lock, fmt, a...) ((void)0)
998 #define LDLM_DEBUG_NOLOCK(format, a...) \
999 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
1001 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
1002 int first_enq, ldlm_error_t *err,
1003 cfs_list_t *work_list);
1009 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
1010 #define LDLM_ITER_STOP 2 /* stop iterating */
1012 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
1013 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
1015 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
1017 void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
1020 int ldlm_replay_locks(struct obd_import *imp);
1021 int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
1022 ldlm_iterator_t iter, void *data);
1025 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
1028 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
1030 struct ldlm_callback_suite {
1031 ldlm_completion_callback lcs_completion;
1032 ldlm_blocking_callback lcs_blocking;
1033 ldlm_glimpse_callback lcs_glimpse;
1034 ldlm_weigh_callback lcs_weigh;
1038 #ifdef HAVE_SERVER_SUPPORT
1039 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
1040 void *data, int flag);
1041 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
1042 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
1043 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
1044 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
1045 ldlm_blocking_callback, ldlm_glimpse_callback);
1046 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
1047 const struct ldlm_request *dlm_req,
1048 const struct ldlm_callback_suite *cbs);
1049 int ldlm_handle_convert(struct ptlrpc_request *req);
1050 int ldlm_handle_convert0(struct ptlrpc_request *req,
1051 const struct ldlm_request *dlm_req);
1052 int ldlm_handle_cancel(struct ptlrpc_request *req);
1053 int ldlm_request_cancel(struct ptlrpc_request *req,
1054 const struct ldlm_request *dlm_req, int first);
1055 void ldlm_revoke_export_locks(struct obd_export *exp);
1057 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
1058 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
1059 int ldlm_get_ref(void);
1060 void ldlm_put_ref(void);
1061 int ldlm_init_export(struct obd_export *exp);
1062 void ldlm_destroy_export(struct obd_export *exp);
1065 #ifdef HAVE_SERVER_SUPPORT
1066 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
1068 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
1069 void ldlm_lock2handle(const struct ldlm_lock *lock,
1070 struct lustre_handle *lockh);
1071 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
1072 void ldlm_cancel_callback(struct ldlm_lock *);
1073 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
1074 int ldlm_lock_set_data(struct lustre_handle *, void *);
1076 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
1078 return __ldlm_handle2lock(h, 0);
1081 #define LDLM_LOCK_REF_DEL(lock) \
1082 lu_ref_del(&lock->l_reference, "handle", cfs_current())
1084 static inline struct ldlm_lock *
1085 ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
1087 struct ldlm_lock *lock;
1089 lock = __ldlm_handle2lock(h, flags);
1091 LDLM_LOCK_REF_DEL(lock);
1095 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
1096 struct ptlrpc_request *r, int increase)
1098 if (ldlm_res_to_ns(res)->ns_lvbo &&
1099 ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
1100 return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
1106 int ldlm_error2errno(ldlm_error_t error);
1107 ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
1108 * confuses user-space. */
1109 #if LUSTRE_TRACKS_LOCK_EXP_REFS
1110 void ldlm_dump_export_locks(struct obd_export *exp);
1114 * Release a temporary lock reference obtained by ldlm_handle2lock() or
1115 * __ldlm_handle2lock().
1117 #define LDLM_LOCK_PUT(lock) \
1119 LDLM_LOCK_REF_DEL(lock); \
1120 /*LDLM_DEBUG((lock), "put");*/ \
1121 ldlm_lock_put(lock); \
1125 * Release a lock reference obtained by some other means (see
1128 #define LDLM_LOCK_RELEASE(lock) \
1130 /*LDLM_DEBUG((lock), "put");*/ \
1131 ldlm_lock_put(lock); \
1134 #define LDLM_LOCK_GET(lock) \
1136 ldlm_lock_get(lock); \
1137 /*LDLM_DEBUG((lock), "get");*/ \
1141 #define ldlm_lock_list_put(head, member, count) \
1143 struct ldlm_lock *_lock, *_next; \
1145 cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
1148 cfs_list_del_init(&_lock->member); \
1149 LDLM_LOCK_RELEASE(_lock); \
1154 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1155 void ldlm_lock_put(struct ldlm_lock *lock);
1156 void ldlm_lock_destroy(struct ldlm_lock *lock);
1157 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1158 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
1159 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
1160 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
1161 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
1162 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1163 void ldlm_lock_fail_match(struct ldlm_lock *lock);
1164 void ldlm_lock_allow_match(struct ldlm_lock *lock);
1165 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1166 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1167 const struct ldlm_res_id *, ldlm_type_t type,
1168 ldlm_policy_data_t *, ldlm_mode_t mode,
1169 struct lustre_handle *, int unref);
1170 ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1172 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1174 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
1175 void ldlm_lock_cancel(struct ldlm_lock *lock);
1176 void ldlm_reprocess_all(struct ldlm_resource *res);
1177 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
1178 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
1179 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
1182 struct ldlm_namespace *
1183 ldlm_namespace_new(struct obd_device *obd, char *name,
1184 ldlm_side_t client, ldlm_appetite_t apt,
1185 ldlm_ns_type_t ns_type);
1186 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
1187 void ldlm_namespace_free(struct ldlm_namespace *ns,
1188 struct obd_import *imp, int force);
1189 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
1190 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
1191 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
1192 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
1193 void ldlm_namespace_get(struct ldlm_namespace *ns);
1194 void ldlm_namespace_put(struct ldlm_namespace *ns);
1195 int ldlm_proc_setup(void);
1197 void ldlm_proc_cleanup(void);
1199 static inline void ldlm_proc_cleanup(void) {}
1202 /* resource.c - internal */
1203 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
1204 struct ldlm_resource *parent,
1205 const struct ldlm_res_id *,
1206 ldlm_type_t type, int create);
1207 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
1208 int ldlm_resource_putref(struct ldlm_resource *res);
1209 void ldlm_resource_add_lock(struct ldlm_resource *res,
1211 struct ldlm_lock *lock);
1212 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
1213 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
1214 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
1215 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
1216 void ldlm_resource_dump(int level, struct ldlm_resource *);
1217 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
1218 const struct ldlm_res_id *);
1220 #define LDLM_RESOURCE_ADDREF(res) do { \
1221 lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
1224 #define LDLM_RESOURCE_DELREF(res) do { \
1225 lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
1228 /* ldlm_request.c */
1229 int ldlm_expired_completion_wait(void *data);
1230 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
1231 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1232 void *data, int flag);
1233 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
1234 int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data);
1235 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
1236 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
1237 struct ldlm_enqueue_info *einfo,
1238 const struct ldlm_res_id *res_id,
1239 ldlm_policy_data_t const *policy, int *flags,
1240 void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
1242 int ldlm_prep_enqueue_req(struct obd_export *exp,
1243 struct ptlrpc_request *req,
1244 cfs_list_t *cancels,
1246 int ldlm_prep_elc_req(struct obd_export *exp,
1247 struct ptlrpc_request *req,
1248 int version, int opc, int canceloff,
1249 cfs_list_t *cancels, int count);
1250 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
1251 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
1252 int *flags, void *lvb, __u32 lvb_len,
1253 struct lustre_handle *lockh, int rc);
1254 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
1255 const struct ldlm_res_id *res_id,
1256 ldlm_type_t type, ldlm_policy_data_t *policy,
1257 ldlm_mode_t mode, int *flags,
1258 ldlm_blocking_callback blocking,
1259 ldlm_completion_callback completion,
1260 ldlm_glimpse_callback glimpse,
1261 void *data, __u32 lvb_len,
1262 const __u64 *client_cookie,
1263 struct lustre_handle *lockh);
1264 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
1265 void *data, __u32 data_len);
1266 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
1267 int ldlm_cli_update_pool(struct ptlrpc_request *req);
1268 int ldlm_cli_cancel(struct lustre_handle *lockh);
1269 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
1270 ldlm_cancel_flags_t flags, void *opaque);
1271 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1272 const struct ldlm_res_id *res_id,
1273 ldlm_policy_data_t *policy,
1275 ldlm_cancel_flags_t flags,
1277 int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
1278 int count, ldlm_cancel_flags_t flags);
1279 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1280 cfs_list_t *cancels,
1281 ldlm_policy_data_t *policy,
1282 ldlm_mode_t mode, int lock_flags,
1283 ldlm_cancel_flags_t cancel_flags, void *opaque);
1284 int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
1285 ldlm_cancel_flags_t flags);
1286 int ldlm_cli_cancel_list(cfs_list_t *head, int count,
1287 struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
1290 /* This has to be here because recursive inclusion sucks. */
1291 int intent_disposition(struct ldlm_reply *rep, int flag);
1292 void intent_set_disposition(struct ldlm_reply *rep, int flag);
1295 /* ioctls for trying requests */
1296 #define IOC_LDLM_TYPE 'f'
1297 #define IOC_LDLM_MIN_NR 40
1299 #define IOC_LDLM_TEST _IOWR('f', 40, long)
1300 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
1301 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
1302 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
1303 #define IOC_LDLM_MAX_NR 43
1306 * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
1307 * than one lock_res is dead-lock safe.
1309 enum lock_res_type {
1314 static inline void lock_res(struct ldlm_resource *res)
1316 cfs_spin_lock(&res->lr_lock);
1319 static inline void lock_res_nested(struct ldlm_resource *res,
1320 enum lock_res_type mode)
1322 cfs_spin_lock_nested(&res->lr_lock, mode);
1325 static inline void unlock_res(struct ldlm_resource *res)
1327 cfs_spin_unlock(&res->lr_lock);
1330 static inline void check_res_locked(struct ldlm_resource *res)
1332 LASSERT_SPIN_LOCKED(&res->lr_lock);
1335 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
1336 void unlock_res_and_lock(struct ldlm_lock *lock);
1339 void ldlm_pools_recalc(ldlm_side_t client);
1340 int ldlm_pools_init(void);
1341 void ldlm_pools_fini(void);
1343 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1344 int idx, ldlm_side_t client);
1345 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
1346 unsigned int gfp_mask);
1347 void ldlm_pool_fini(struct ldlm_pool *pl);
1348 int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
1349 int ldlm_pool_recalc(struct ldlm_pool *pl);
1350 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
1351 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
1352 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
1353 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
1354 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
1355 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
1356 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
1357 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1358 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);