1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * (visit-tags-table FILE)
3 * vim:expandtab:shiftwidth=8:tabstop=8:
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
10 #include <linux/lustre_dlm.h>
11 #elif defined(__APPLE__)
12 #include <darwin/lustre_dlm.h>
13 #elif defined(__WINNT__)
14 #include <winnt/lustre_dlm.h>
16 #error Unsupported operating system.
19 #include <lustre_lib.h>
20 #include <lustre_net.h>
21 #include <lustre_import.h>
22 #include <lustre_handles.h>
23 #include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
24 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
29 #define OBD_LDLM_DEVICENAME "ldlm"
31 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
32 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
33 #define LDLM_CTIME_AGE_LIMIT (10)
38 ELDLM_LOCK_CHANGED = 300,
39 ELDLM_LOCK_ABORTED = 301,
40 ELDLM_LOCK_REPLACED = 302,
41 ELDLM_NO_LOCK_DATA = 303,
43 ELDLM_NAMESPACE_EXISTS = 400,
44 ELDLM_BAD_NAMESPACE = 401
48 LDLM_NAMESPACE_SERVER = 1 << 0,
49 LDLM_NAMESPACE_CLIENT = 1 << 1
52 #define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
54 /* If the server returns one of these flags, then the lock was put on that list.
55 * If the client sends one of these flags (during recovery ONLY!), it wants the
56 * lock added to the specified list, no questions asked. -p */
57 #define LDLM_FL_BLOCK_GRANTED 0x000002
58 #define LDLM_FL_BLOCK_CONV 0x000004
59 #define LDLM_FL_BLOCK_WAIT 0x000008
61 #define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
62 #define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */
63 #define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
64 #define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
66 /* Lock is being replayed. This could probably be implied by the fact that one
67 * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
68 #define LDLM_FL_REPLAY 0x000100
70 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
71 #define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused */
73 /* don't run the cancel callback under ldlm_cli_cancel_unused */
74 #define LDLM_FL_FAILED 0x000800
76 #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
77 #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
78 #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
79 #define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */
80 #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
82 #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
85 /* file & record locking */
86 #define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked
87 #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
89 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
90 * the LVB filling happens _after_ the lock has been granted, so another thread
91 * can match`t before the LVB has been updated. As a dirty hack, we set
92 * LDLM_FL_LVB_READY only after we've done the LVB poop.
93 * this is only needed on lov/osc now, where lvb is actually used and callers
94 * must set it in input flags.
96 * The proper fix is to do the granting inside of the completion AST, which can
97 * be replaced with a LVB-aware wrapping function for OSC locks. That change is
98 * pretty high-risk, though, and would need a lot more testing. */
100 #define LDLM_FL_LVB_READY 0x100000
102 /* A lock contributes to the kms calculation until it has finished the part
103 * of it's cancelation that performs write back on its dirty pages. It
104 * can remain on the granted list during this whole time. Threads racing
105 * to update the kms after performing their writeback need to know to
106 * exclude each others locks from the calculation as they walk the granted
108 #define LDLM_FL_KMS_IGNORE 0x200000
110 /* Don't drop lock covering mmapped file in LRU */
111 #define LDLM_FL_NO_LRU 0x400000
113 /* Immediatelly cancel such locks when they block some other locks. Send
114 cancel notification to original lock holder, but expect no reply. */
115 #define LDLM_FL_CANCEL_ON_BLOCK 0x800000
117 /* Flags flags inherited from parent lock when doing intents. */
118 #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
120 /* These are flags that are mapped into the flags and ASTs of blocking locks */
121 #define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
122 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
123 #define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
125 /* completion ast to be executed */
126 #define LDLM_FL_CP_REQD 0x1000000
128 /* cleanup_resource has already handled the lock */
129 #define LDLM_FL_CLEANED 0x2000000
131 /* optimization hint: LDLM can run blocking callback from current context
132 * w/o involving separate thread. in order to decrease cs rate */
133 #define LDLM_FL_ATOMIC_CB 0x4000000
135 /* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */
136 #define LDLM_FL_ASYNC 0x8000000
138 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
139 * such that server send blocking ast for conflict locks to this client for
140 * the 1st operation, whereas the 2nd operation has canceled this lock and
141 * is waiting for rpc_lock which is taken by the 1st operation.
142 * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
143 * ELC code to cancel it.
144 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
145 * droped to let ldlm_callback_handler() return EINVAL to the server. It is
146 * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
147 * to send a separate CANCEL rpc. */
148 #define LDLM_FL_BL_AST 0x10000000
149 #define LDLM_FL_BL_DONE 0x20000000
151 /* measure lock contention and return -EUSERS if locking contention is high */
152 #define LDLM_FL_DENY_ON_CONTENTION 0x40000000
154 /* The blocking callback is overloaded to perform two functions. These flags
155 * indicate which operation should be performed. */
156 #define LDLM_CB_BLOCKING 1
157 #define LDLM_CB_CANCELING 2
159 /* compatibility matrix */
160 #define LCK_COMPAT_EX LCK_NL
161 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
162 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
163 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
164 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
165 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
166 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
168 extern ldlm_mode_t lck_compat_array[];
170 static inline void lockmode_verify(ldlm_mode_t mode)
172 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
175 static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
177 return (lck_compat_array[exist_mode] & new_mode);
182 * cluster name spaces
186 #define DLM_OST_NAMESPACE 1
187 #define DLM_MDS_NAMESPACE 2
190 - do we just separate this by security domains and use a prefix for
191 multiple namespaces in the same domain?
201 * waiting_locks_spinlock
216 struct ldlm_resource;
217 struct ldlm_namespace;
219 struct ldlm_pool_ops {
220 int (*po_recalc)(struct ldlm_pool *pl);
221 int (*po_shrink)(struct ldlm_pool *pl, int nr,
222 unsigned int gfp_mask);
223 int (*po_setup)(struct ldlm_pool *pl, int limit);
226 /* One second for pools thread check interval. */
227 #define LDLM_POOLS_THREAD_PERIOD (1)
229 /* 5% margin for modest pools. See ldlm_pool.c for details. */
230 #define LDLM_POOLS_MODEST_MARGIN (5)
232 /* A change to SLV in % after which we want to wake up pools thread asap. */
233 #define LDLM_POOLS_FAST_SLV_CHANGE (50)
237 * Pool proc directory.
239 cfs_proc_dir_entry_t *pl_proc_dir;
241 * Pool name, should be long enough to contain compound proc entry name.
245 * Lock for protecting slv/clv updates.
249 * Number of allowed locks in in pool, both, client and server side.
253 * Number of granted locks in
259 atomic_t pl_grant_rate;
263 atomic_t pl_cancel_rate;
265 * Grant speed (GR-CR) per T.
267 atomic_t pl_grant_speed;
269 * Server lock volume. Protected by pl_lock.
271 __u64 pl_server_lock_volume;
273 * Current biggest client lock volume. Protected by pl_lock.
275 __u64 pl_client_lock_volume;
277 * Lock volume factor. SLV on client is calculated as following:
278 * server_slv * lock_volume_factor.
280 atomic_t pl_lock_volume_factor;
282 * Time when last slv from server was obtained.
284 time_t pl_recalc_time;
286 * Recalc and shrink ops.
288 struct ldlm_pool_ops *pl_ops;
290 * Planned number of granted locks for next T.
294 * Grant plan step for next T.
300 struct lprocfs_stats *pl_stats;
303 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
304 void *req_cookie, ldlm_mode_t mode, int flags,
307 struct ldlm_valblock_ops {
308 int (*lvbo_init)(struct ldlm_resource *res);
309 int (*lvbo_update)(struct ldlm_resource *res, struct lustre_msg *m,
310 int buf_idx, int increase);
314 LDLM_NAMESPACE_GREEDY = 1 << 0,
315 LDLM_NAMESPACE_MODEST = 1 << 1
319 * Default value for ->ns_shrink_thumb. If lock is not extent one its cost
320 * is one page. Here we have 256 pages which is 1M on i386. Thus by default
321 * all extent locks which have more than 1M long extent will be kept in lru,
322 * others (including ibits locks) will be canceled on memory pressure event.
324 #define LDLM_LOCK_SHRINK_THUMB 256
327 * Default values for the "max_nolock_size", "contention_time" and
328 * "contended_locks" namespace tunables.
330 #define NS_DEFAULT_MAX_NOLOCK_BYTES 0
331 #define NS_DEFAULT_CONTENTION_SECONDS 2
332 #define NS_DEFAULT_CONTENDED_LOCKS 32
334 struct ldlm_namespace {
336 * Namespace name. Used for logging, etc.
341 * Is this a client-side lock tree?
343 ldlm_side_t ns_client;
346 * Namespce connect flags supported by server (may be changed via proc,
347 * lru resize may be disabled/enabled).
349 __u64 ns_connect_flags;
352 * Client side orig connect flags supported by server.
354 __u64 ns_orig_connect_flags;
357 * Hash table for namespace.
359 struct list_head *ns_hash;
360 spinlock_t ns_hash_lock;
363 * Count of resources in the hash.
368 * All root resources in namespace.
370 struct list_head ns_root_list;
373 * Position in global namespace list.
375 struct list_head ns_list_chain;
378 * All root resources in namespace.
380 struct list_head ns_unused_list;
382 spinlock_t ns_unused_lock;
384 unsigned int ns_max_unused;
385 unsigned int ns_max_age;
390 unsigned int ns_ctime_age_limit;
393 * Lower limit to number of pages in lock to keep it in cache.
395 unsigned int ns_shrink_thumb;
398 * Next debug dump, jiffies.
400 cfs_time_t ns_next_dump;
404 ldlm_res_policy ns_policy;
405 struct ldlm_valblock_ops *ns_lvbo;
407 cfs_waitq_t ns_waitq;
408 struct ldlm_pool ns_pool;
409 ldlm_appetite_t ns_appetite;
412 * If more than @ns_contented_locks found, the resource considered
415 unsigned ns_contended_locks;
418 * The resource remembers contended state during @ns_contention_time,
421 unsigned ns_contention_time;
424 * Limit size of nolock requests, in bytes.
426 unsigned ns_max_nolock_size;
429 * Backward link to obd, required for ldlm pool to store new SLV.
431 struct obd_device *ns_obd;
433 struct adaptive_timeout ns_at_estimate;/* estimated lock callback time*/
436 static inline int ns_is_client(struct ldlm_namespace *ns)
439 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
440 LDLM_NAMESPACE_SERVER)));
441 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
442 ns->ns_client == LDLM_NAMESPACE_SERVER);
443 return ns->ns_client == LDLM_NAMESPACE_CLIENT;
446 static inline int ns_is_server(struct ldlm_namespace *ns)
449 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
450 LDLM_NAMESPACE_SERVER)));
451 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
452 ns->ns_client == LDLM_NAMESPACE_SERVER);
453 return ns->ns_client == LDLM_NAMESPACE_SERVER;
456 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
459 return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
464 * Resource hash table
468 #define RES_HASH_BITS 10
469 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
470 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
474 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
475 struct ldlm_lock_desc *new, void *data,
477 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
479 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
481 /* Interval node data for each LDLM_EXTENT lock */
482 struct ldlm_interval {
483 struct interval_node li_node; /* node for tree mgmt */
484 struct list_head li_group; /* the locks which have the same
485 * policy - group of the policy */
487 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
489 /* the interval tree must be accessed inside the resource lock. */
490 struct ldlm_interval_tree {
491 /* tree size, this variable is used to count
492 * granted PW locks in ldlm_extent_policy()*/
494 ldlm_mode_t lit_mode; /* lock mode */
495 struct interval_node *lit_root; /* actually ldlm_interval */
499 struct portals_handle l_handle; // must be first in the structure
502 /* internal spinlock protects l_resource. we should hold this lock
503 * first before grabbing res_lock.*/
506 /* ldlm_lock_change_resource() can change this */
507 struct ldlm_resource *l_resource;
509 /* protected by ns_hash_lock. FIXME */
510 struct list_head l_lru;
512 /* protected by lr_lock, linkage to resource's lock queues */
513 struct list_head l_res_link;
515 struct ldlm_interval *l_tree_node; /* tree node for ldlm_extent */
517 /* protected by led_lock */
518 struct list_head l_export_chain; // per-export chain of locks
520 /* protected by lr_lock */
521 ldlm_mode_t l_req_mode;
522 ldlm_mode_t l_granted_mode;
524 ldlm_completion_callback l_completion_ast;
525 ldlm_blocking_callback l_blocking_ast;
526 ldlm_glimpse_callback l_glimpse_ast;
528 struct obd_export *l_export;
529 struct obd_export *l_conn_export;
531 struct lustre_handle l_remote_handle;
532 ldlm_policy_data_t l_policy_data;
534 /* protected by lr_lock */
540 /* If the lock is granted, a process sleeps on this waitq to learn when
541 * it's no longer in use. If the lock is not granted, a process sleeps
542 * on this waitq to learn when it becomes granted. */
544 struct timeval l_enqueued_time;
546 cfs_time_t l_last_used; /* jiffies */
547 struct ldlm_extent l_req_extent;
549 /* Client-side-only members */
550 __u32 l_lvb_len; /* temporary storage for */
551 void *l_lvb_data; /* an LVB received during */
552 void *l_lvb_swabber; /* an enqueue */
554 spinlock_t l_extents_list_lock;
555 struct list_head l_extents_list;
557 struct list_head l_cache_locks_list;
559 /* Server-side-only members */
561 /* protected by elt_lock */
562 struct list_head l_pending_chain; /* callbacks pending */
563 cfs_time_t l_callback_timeout; /* jiffies */
565 __u32 l_pid; /* pid which created this lock */
567 /* for ldlm_add_ast_work_item() */
568 struct list_head l_bl_ast;
569 struct list_head l_cp_ast;
570 struct ldlm_lock *l_blocking_lock;
573 /* protected by lr_lock, linkages to "skip lists" */
574 struct list_head l_sl_mode;
575 struct list_head l_sl_policy;
578 struct ldlm_resource {
579 struct ldlm_namespace *lr_namespace;
581 /* protected by ns_hash_lock */
582 struct list_head lr_hash;
583 struct ldlm_resource *lr_parent; /* 0 for a root resource */
584 struct list_head lr_children; /* list head for child resources */
585 struct list_head lr_childof; /* part of ns_root_list if root res,
586 * part of lr_children if child */
589 /* protected by lr_lock */
590 struct list_head lr_granted;
591 struct list_head lr_converting;
592 struct list_head lr_waiting;
593 ldlm_mode_t lr_most_restr;
594 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
595 struct ldlm_res_id lr_name;
596 atomic_t lr_refcount;
598 struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
600 /* Server-side-only lock value block elements */
601 struct semaphore lr_lvb_sem;
605 /* when the resource was considered as contended */
606 cfs_time_t lr_contention_time;
609 struct ldlm_ast_work {
610 struct ldlm_lock *w_lock;
612 struct ldlm_lock_desc w_desc;
613 struct list_head w_list;
619 /* ldlm_enqueue parameters common */
620 struct ldlm_enqueue_info {
621 __u32 ei_type; /* Type of the lock being enqueued. */
622 __u32 ei_mode; /* Mode of the lock being enqueued. */
623 void *ei_cb_bl; /* blocking lock callback */
624 void *ei_cb_cp; /* lock completion callback */
625 void *ei_cb_gl; /* lock glimpse callback */
626 void *ei_cbdata; /* Data to be passed into callbacks. */
629 extern struct obd_ops ldlm_obd_ops;
631 extern char *ldlm_lockname[];
632 extern char *ldlm_typename[];
633 extern char *ldlm_it2str(int it);
635 #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
638 if (((level) & D_CANTMASK) != 0 || \
639 ((libcfs_debug & (level)) != 0 && \
640 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
641 static struct libcfs_debug_msg_data _ldlm_dbg_data = \
642 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, \
644 _ldlm_lock_debug(lock, level, &_ldlm_dbg_data, fmt, \
649 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
650 struct libcfs_debug_msg_data *data, const char *fmt,
652 __attribute__ ((format (printf, 4, 5)));
654 #define LDLM_ERROR(lock, fmt, a...) do { \
655 static cfs_debug_limit_state_t _ldlm_cdls; \
656 ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock, \
657 __FILE__, __FUNCTION__, __LINE__, \
661 #define LDLM_DEBUG(lock, fmt, a...) do { \
662 ldlm_lock_debug(NULL, D_DLMTRACE, lock, \
663 __FILE__, __FUNCTION__, __LINE__, \
667 #define LDLM_DEBUG(lock, fmt, a...) ((void)0)
668 #define LDLM_ERROR(lock, fmt, a...) ((void)0)
671 #define LDLM_DEBUG_NOLOCK(format, a...) \
672 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
674 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
675 int first_enq, ldlm_error_t *err,
676 struct list_head *work_list);
682 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
683 #define LDLM_ITER_STOP 2 /* stop iterating */
685 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
686 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
688 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
690 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
692 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
693 ldlm_res_iterator_t iter, void *closure);
695 int ldlm_replay_locks(struct obd_import *imp);
696 void ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
697 ldlm_iterator_t iter, void *data);
700 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
703 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
707 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
708 void *data, int flag);
709 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
710 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
711 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
712 ldlm_blocking_callback, ldlm_glimpse_callback);
713 int ldlm_handle_convert(struct ptlrpc_request *req);
714 int ldlm_handle_cancel(struct ptlrpc_request *req);
715 int ldlm_request_cancel(struct ptlrpc_request *req,
716 const struct ldlm_request *dlm_req, int first);
717 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
718 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock);
719 void ldlm_revoke_export_locks(struct obd_export *exp);
720 int ldlm_get_ref(void);
721 void ldlm_put_ref(void);
724 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
725 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
726 void ldlm_lock2handle(const struct ldlm_lock *lock,
727 struct lustre_handle *lockh);
728 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
729 void ldlm_cancel_callback(struct ldlm_lock *);
730 int ldlm_lock_set_data(struct lustre_handle *, void *data);
731 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
732 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
733 const struct lustre_handle *);
735 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
737 return __ldlm_handle2lock(h, 0);
740 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
741 struct lustre_msg *m, int buf_idx,
744 if (res->lr_namespace->ns_lvbo &&
745 res->lr_namespace->ns_lvbo->lvbo_update) {
746 return res->lr_namespace->ns_lvbo->lvbo_update(res, m, buf_idx,
752 #define LDLM_LOCK_PUT(lock) \
754 /*LDLM_DEBUG((lock), "put");*/ \
755 ldlm_lock_put(lock); \
758 #define LDLM_LOCK_GET(lock) \
760 ldlm_lock_get(lock); \
761 /*LDLM_DEBUG((lock), "get");*/ \
765 #define ldlm_lock_list_put(head, member, count) \
767 struct ldlm_lock *_lock, *_next; \
769 list_for_each_entry_safe(_lock, _next, head, member) { \
772 list_del_init(&_lock->member); \
773 LDLM_LOCK_PUT(_lock); \
778 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
779 void ldlm_lock_put(struct ldlm_lock *lock);
780 void ldlm_lock_destroy(struct ldlm_lock *lock);
781 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
782 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
783 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
784 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
785 void ldlm_lock_allow_match(struct ldlm_lock *lock);
786 int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **);
787 void ldlm_lock_fast_release(void *, int);
788 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
789 const struct ldlm_res_id *, ldlm_type_t type,
790 ldlm_policy_data_t *, ldlm_mode_t mode,
791 struct lustre_handle *);
792 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
794 void ldlm_lock_cancel(struct ldlm_lock *lock);
795 void ldlm_cancel_locks_for_export(struct obd_export *export);
796 void ldlm_reprocess_all(struct ldlm_resource *res);
797 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
798 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
799 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
800 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
803 struct ldlm_namespace *
804 ldlm_namespace_new(struct obd_device *obd, char *name,
805 ldlm_side_t client, ldlm_appetite_t apt);
806 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
807 void ldlm_namespace_free(struct ldlm_namespace *ns,
808 struct obd_import *imp, int force);
809 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
810 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
811 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
812 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
813 void ldlm_namespace_get_locked(struct ldlm_namespace *ns);
814 void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup);
815 void ldlm_namespace_get(struct ldlm_namespace *ns);
816 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup);
817 int ldlm_proc_setup(void);
819 void ldlm_proc_cleanup(void);
821 static inline void ldlm_proc_cleanup(void) {}
824 /* resource.c - internal */
825 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
826 struct ldlm_resource *parent,
827 const struct ldlm_res_id *,
828 ldlm_type_t type, int create);
829 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
830 int ldlm_resource_putref(struct ldlm_resource *res);
831 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
832 struct ldlm_lock *lock);
833 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
834 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
835 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
836 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
837 void ldlm_resource_dump(int level, struct ldlm_resource *);
838 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
839 const struct ldlm_res_id *);
841 struct ldlm_callback_suite {
842 ldlm_completion_callback lcs_completion;
843 ldlm_blocking_callback lcs_blocking;
844 ldlm_glimpse_callback lcs_glimpse;
848 int ldlm_expired_completion_wait(void *data);
849 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
850 void *data, int flag);
851 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
852 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
853 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
854 struct ldlm_enqueue_info *einfo,
855 const struct ldlm_res_id *res_id,
856 ldlm_policy_data_t *policy, int *flags,
857 void *lvb, __u32 lvb_len, void *lvb_swabber,
858 struct lustre_handle *lockh, int async);
859 int ldlm_prep_enqueue_req(struct obd_export *exp,
860 struct ptlrpc_request *req,
861 struct list_head *cancels,
863 int ldlm_prep_elc_req(struct obd_export *exp,
864 struct ptlrpc_request *req,
865 int version, int opc, int canceloff,
866 struct list_head *cancels, int count);
867 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
868 const struct ldlm_request *dlm_req,
869 const struct ldlm_callback_suite *cbs);
870 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
871 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
872 int *flags, void *lvb, __u32 lvb_len,
873 void *lvb_swabber, struct lustre_handle *lockh,
875 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
876 const struct ldlm_res_id *res_id,
877 ldlm_type_t type, ldlm_policy_data_t *policy,
878 ldlm_mode_t mode, int *flags,
879 ldlm_blocking_callback blocking,
880 ldlm_completion_callback completion,
881 ldlm_glimpse_callback glimpse,
882 void *data, __u32 lvb_len, void *lvb_swabber,
883 struct lustre_handle *lockh);
884 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
885 void *data, __u32 data_len);
886 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
887 int ldlm_cli_update_pool(struct ptlrpc_request *req);
888 int ldlm_handle_convert0(struct ptlrpc_request *req,
889 const struct ldlm_request *dlm_req);
890 int ldlm_cli_cancel(struct lustre_handle *lockh);
891 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
892 int flags, void *opaque);
893 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
894 const struct ldlm_res_id *res_id,
895 ldlm_policy_data_t *policy,
896 ldlm_mode_t mode, int flags, void *opaque);
897 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
898 int count, int flags);
899 int ldlm_cli_join_lru(struct ldlm_namespace *,
900 const struct ldlm_res_id *, int join);
901 int ldlm_cancel_resource_local(struct ldlm_resource *res,
902 struct list_head *cancels,
903 ldlm_policy_data_t *policy,
904 ldlm_mode_t mode, int lock_flags,
905 int cancel_flags, void *opaque);
906 int ldlm_cli_cancel_list(struct list_head *head, int count,
907 struct ptlrpc_request *req, int flags);
910 /* This has to be here because recursive inclusion sucks. */
911 int intent_disposition(struct ldlm_reply *rep, int flag);
912 void intent_set_disposition(struct ldlm_reply *rep, int flag);
915 /* ioctls for trying requests */
916 #define IOC_LDLM_TYPE 'f'
917 #define IOC_LDLM_MIN_NR 40
919 #define IOC_LDLM_TEST _IOWR('f', 40, long)
920 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
921 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
922 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
923 #define IOC_LDLM_MAX_NR 43
925 static inline void lock_res(struct ldlm_resource *res)
927 spin_lock(&res->lr_lock);
930 static inline void unlock_res(struct ldlm_resource *res)
932 spin_unlock(&res->lr_lock);
935 static inline void check_res_locked(struct ldlm_resource *res)
937 LASSERT_SPIN_LOCKED(&res->lr_lock);
940 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
941 void unlock_res_and_lock(struct ldlm_lock *lock);
944 void ldlm_pools_recalc(ldlm_side_t client);
945 int ldlm_pools_init(void);
946 void ldlm_pools_fini(void);
947 void ldlm_pools_wakeup(void);
949 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
950 int idx, ldlm_side_t client);
951 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
952 unsigned int gfp_mask);
953 void ldlm_pool_fini(struct ldlm_pool *pl);
954 int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
955 int ldlm_pool_recalc(struct ldlm_pool *pl);
956 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
957 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
958 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
959 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
960 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
961 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
962 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
963 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
964 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);