1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * (visit-tags-table FILE)
3 * vim:expandtab:shiftwidth=8:tabstop=8:
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
10 #include <linux/lustre_dlm.h>
11 #elif defined(__APPLE__)
12 #include <darwin/lustre_dlm.h>
13 #elif defined(__WINNT__)
14 #include <winnt/lustre_dlm.h>
16 #error Unsupported operating system.
19 #include <lustre_lib.h>
20 #include <lustre_net.h>
21 #include <lustre_import.h>
22 #include <lustre_handles.h>
23 #include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
24 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
29 #define OBD_LDLM_DEVICENAME "ldlm"
31 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
32 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
37 ELDLM_LOCK_CHANGED = 300,
38 ELDLM_LOCK_ABORTED = 301,
39 ELDLM_LOCK_REPLACED = 302,
40 ELDLM_NO_LOCK_DATA = 303,
42 ELDLM_NAMESPACE_EXISTS = 400,
43 ELDLM_BAD_NAMESPACE = 401
47 LDLM_NAMESPACE_SERVER = 1 << 0,
48 LDLM_NAMESPACE_CLIENT = 1 << 1
51 #define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
53 /* If the server returns one of these flags, then the lock was put on that list.
54 * If the client sends one of these flags (during recovery ONLY!), it wants the
55 * lock added to the specified list, no questions asked. -p */
56 #define LDLM_FL_BLOCK_GRANTED 0x000002
57 #define LDLM_FL_BLOCK_CONV 0x000004
58 #define LDLM_FL_BLOCK_WAIT 0x000008
60 #define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
61 #define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */
62 #define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
63 #define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
65 /* Lock is being replayed. This could probably be implied by the fact that one
66 * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
67 #define LDLM_FL_REPLAY 0x000100
69 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
70 #define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused */
72 /* don't run the cancel callback under ldlm_cli_cancel_unused */
73 #define LDLM_FL_FAILED 0x000800
75 #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
76 #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
77 #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
78 #define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */
79 #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
81 #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
84 /* file & record locking */
85 #define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked
86 #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
88 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
89 * the LVB filling happens _after_ the lock has been granted, so another thread
90 * can match`t before the LVB has been updated. As a dirty hack, we set
91 * LDLM_FL_LVB_READY only after we've done the LVB poop.
92 * this is only needed on lov/osc now, where lvb is actually used and callers
93 * must set it in input flags.
95 * The proper fix is to do the granting inside of the completion AST, which can
96 * be replaced with a LVB-aware wrapping function for OSC locks. That change is
97 * pretty high-risk, though, and would need a lot more testing. */
99 #define LDLM_FL_LVB_READY 0x100000
101 /* A lock contributes to the kms calculation until it has finished the part
102 * of it's cancelation that performs write back on its dirty pages. It
103 * can remain on the granted list during this whole time. Threads racing
104 * to update the kms after performing their writeback need to know to
105 * exclude each others locks from the calculation as they walk the granted
107 #define LDLM_FL_KMS_IGNORE 0x200000
109 /* Don't drop lock covering mmapped file in LRU */
110 #define LDLM_FL_NO_LRU 0x400000
112 /* Immediatelly cancel such locks when they block some other locks. Send
113 cancel notification to original lock holder, but expect no reply. */
114 #define LDLM_FL_CANCEL_ON_BLOCK 0x800000
116 /* Flags flags inherited from parent lock when doing intents. */
117 #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
119 /* These are flags that are mapped into the flags and ASTs of blocking locks */
120 #define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
121 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
122 #define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
124 /* completion ast to be executed */
125 #define LDLM_FL_CP_REQD 0x1000000
127 /* cleanup_resource has already handled the lock */
128 #define LDLM_FL_CLEANED 0x2000000
130 /* optimization hint: LDLM can run blocking callback from current context
131 * w/o involving separate thread. in order to decrease cs rate */
132 #define LDLM_FL_ATOMIC_CB 0x4000000
134 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
135 * such that server send blocking ast for conflict locks to this client for
136 * the 1st operation, whereas the 2nd operation has canceled this lock and
137 * is waiting for rpc_lock which is taken by the 1st operation.
138 * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
139 * ELC code to cancel it.
140 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
141 * droped to let ldlm_callback_handler() return EINVAL to the server. It is
142 * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
143 * to send a separate CANCEL rpc. */
144 #define LDLM_FL_BL_AST 0x10000000
145 #define LDLM_FL_BL_DONE 0x20000000
147 /* measure lock contention and return -EUSERS if locking contention is high */
148 #define LDLM_FL_DENY_ON_CONTENTION 0x40000000
150 /* The blocking callback is overloaded to perform two functions. These flags
151 * indicate which operation should be performed. */
152 #define LDLM_CB_BLOCKING 1
153 #define LDLM_CB_CANCELING 2
155 /* compatibility matrix */
156 #define LCK_COMPAT_EX LCK_NL
157 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
158 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
159 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
160 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
161 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
162 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
164 extern ldlm_mode_t lck_compat_array[];
166 static inline void lockmode_verify(ldlm_mode_t mode)
168 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
171 static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new)
173 return (lck_compat_array[exist] & new);
178 * cluster name spaces
182 #define DLM_OST_NAMESPACE 1
183 #define DLM_MDS_NAMESPACE 2
186 - do we just separate this by security domains and use a prefix for
187 multiple namespaces in the same domain?
197 * waiting_locks_spinlock
212 struct ldlm_resource;
213 struct ldlm_namespace;
215 struct ldlm_pool_ops {
216 int (*po_recalc)(struct ldlm_pool *pl);
217 int (*po_shrink)(struct ldlm_pool *pl, int nr,
218 unsigned int gfp_mask);
219 int (*po_setup)(struct ldlm_pool *pl, int limit);
222 /* One second for pools thread check interval. */
223 #define LDLM_POOLS_THREAD_PERIOD (1)
225 /* 5% margin for modest pools. See ldlm_pool.c for details. */
226 #define LDLM_POOLS_MODEST_MARGIN (5)
228 /* A change to SLV in % after which we want to wake up pools thread asap. */
229 #define LDLM_POOLS_FAST_SLV_CHANGE (50)
232 /* Common pool fields */
233 cfs_proc_dir_entry_t *pl_proc_dir; /* Pool proc directory. */
234 char pl_name[100]; /* Pool name, should be long
235 * enough to contain complex
236 * proc entry name. */
237 spinlock_t pl_lock; /* Lock for protecting slv/clv
239 atomic_t pl_limit; /* Number of allowed locks in
240 * in pool, both, client and
242 atomic_t pl_granted; /* Number of granted locks. */
243 atomic_t pl_grant_rate; /* Grant rate per T. */
244 atomic_t pl_cancel_rate; /* Cancel rate per T. */
245 atomic_t pl_grant_speed; /* Grant speed (GR-CR) per T. */
246 __u64 pl_server_lock_volume; /* Server lock volume.
247 * Protected by pl_lock */
248 __u64 pl_client_lock_volume; /* Client lock volue. */
249 atomic_t pl_lock_volume_factor; /* Lock volume factor. */
251 time_t pl_recalc_time; /* Time when last slv from
252 * server was obtained. */
253 struct ldlm_pool_ops *pl_ops; /* Recalc and shrink ops. */
255 int pl_grant_plan; /* Planned number of granted
256 * locks for next T. */
257 int pl_grant_step; /* Grant plan step for next
260 struct lprocfs_stats *pl_stats; /* Pool statistics. */
263 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
264 void *req_cookie, ldlm_mode_t mode, int flags,
267 struct ldlm_valblock_ops {
268 int (*lvbo_init)(struct ldlm_resource *res);
269 int (*lvbo_update)(struct ldlm_resource *res, struct ptlrpc_request *r,
270 int buf_idx, int increase);
274 LDLM_NAMESPACE_GREEDY = 1 << 0,
275 LDLM_NAMESPACE_MODEST = 1 << 1
278 /* default values for the "max_nolock_size", "contention_time"
279 * and "contended_locks" namespace tunables */
280 #define NS_DEFAULT_MAX_NOLOCK_BYTES 0
281 #define NS_DEFAULT_CONTENTION_SECONDS 2
282 #define NS_DEFAULT_CONTENDED_LOCKS 32
284 /* Default value for ->ns_shrink_thumb. If lock is not extent one its cost
285 * is one page. Here we have 256 pages which is 1M on i386. Thus by default
286 * all extent locks which have more than 1M long extent will be kept in lru,
287 * others (including ibits locks) will be canceled on memory pressure event. */
288 #define LDLM_LOCK_SHRINK_THUMB 256
290 struct ldlm_namespace {
292 ldlm_side_t ns_client; /* is this a client-side lock tree? */
293 __u64 ns_connect_flags; /* ns connect flags supported
294 * by server (may be changed via proc,
295 * lru resize may be disabled/enabled) */
296 __u64 ns_orig_connect_flags; /* client side orig connect
297 * flags supported by server */
298 struct list_head *ns_hash; /* hash table for ns */
299 spinlock_t ns_hash_lock;
300 __u32 ns_refcount; /* count of resources in the hash */
301 struct list_head ns_root_list; /* all root resources in ns */
302 struct list_head ns_list_chain; /* position in global NS list */
304 struct list_head ns_unused_list; /* all root resources in ns */
306 spinlock_t ns_unused_lock;
308 unsigned int ns_max_unused;
309 unsigned int ns_max_age;
311 /* Lower limit to number of pages in lock to keep it in cache */
312 unsigned int ns_shrink_thumb;
313 cfs_time_t ns_next_dump; /* next debug dump, jiffies */
317 ldlm_res_policy ns_policy;
318 struct ldlm_valblock_ops *ns_lvbo;
320 cfs_waitq_t ns_waitq;
321 struct ldlm_pool ns_pool;
322 ldlm_appetite_t ns_appetite;
324 /* if more than @ns_contented_locks found, the resource considered
326 unsigned ns_contended_locks;
327 /* the resource remembers contended state during @ns_contention_time,
329 unsigned ns_contention_time;
330 /* limit size of nolock requests, in bytes */
331 unsigned ns_max_nolock_size;
333 struct adaptive_timeout ns_at_estimate;/* estimated lock callback time*/
334 /* backward link to obd, required for ldlm pool to store new SLV. */
335 struct obd_device *ns_obd;
338 static inline int ns_is_client(struct ldlm_namespace *ns)
341 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
342 LDLM_NAMESPACE_SERVER)));
343 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
344 ns->ns_client == LDLM_NAMESPACE_SERVER);
345 return ns->ns_client == LDLM_NAMESPACE_CLIENT;
348 static inline int ns_is_server(struct ldlm_namespace *ns)
351 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
352 LDLM_NAMESPACE_SERVER)));
353 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
354 ns->ns_client == LDLM_NAMESPACE_SERVER);
355 return ns->ns_client == LDLM_NAMESPACE_SERVER;
358 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
361 return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
366 * Resource hash table
370 #define RES_HASH_BITS 10
371 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
372 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
376 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
377 struct ldlm_lock_desc *new, void *data,
379 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
381 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
383 /* Interval node data for each LDLM_EXTENT lock */
384 struct ldlm_interval {
385 struct interval_node li_node; /* node for tree mgmt */
386 struct list_head li_group; /* the locks which have the same
387 * policy - group of the policy */
389 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
391 /* the interval tree must be accessed inside the resource lock. */
392 struct ldlm_interval_tree {
393 /* tree size, this variable is used to count
394 * granted PW locks in ldlm_extent_policy()*/
396 ldlm_mode_t lit_mode; /* lock mode */
397 struct interval_node *lit_root; /* actually ldlm_interval */
401 struct portals_handle l_handle; // must be first in the structure
404 /* internal spinlock protects l_resource. we should hold this lock
405 * first before grabbing res_lock.*/
408 /* ldlm_lock_change_resource() can change this */
409 struct ldlm_resource *l_resource;
411 /* protected by ns_hash_lock. FIXME */
412 struct list_head l_lru;
414 /* protected by lr_lock, linkage to resource's lock queues */
415 struct list_head l_res_link;
417 struct ldlm_interval *l_tree_node; /* tree node for ldlm_extent */
419 /* protected by led_lock */
420 struct list_head l_export_chain; // per-export chain of locks
422 /* protected by lr_lock */
423 ldlm_mode_t l_req_mode;
424 ldlm_mode_t l_granted_mode;
426 ldlm_completion_callback l_completion_ast;
427 ldlm_blocking_callback l_blocking_ast;
428 ldlm_glimpse_callback l_glimpse_ast;
430 struct obd_export *l_export;
431 struct obd_export *l_conn_export;
433 struct lustre_handle l_remote_handle;
434 ldlm_policy_data_t l_policy_data;
436 /* protected by lr_lock */
442 /* If the lock is granted, a process sleeps on this waitq to learn when
443 * it's no longer in use. If the lock is not granted, a process sleeps
444 * on this waitq to learn when it becomes granted. */
446 struct timeval l_enqueued_time;
448 cfs_time_t l_last_used; /* jiffies */
449 struct ldlm_extent l_req_extent;
451 /* Client-side-only members */
452 __u32 l_lvb_len; /* temporary storage for */
453 void *l_lvb_data; /* an LVB received during */
454 void *l_lvb_swabber; /* an enqueue */
456 spinlock_t l_extents_list_lock;
457 struct list_head l_extents_list;
459 struct list_head l_cache_locks_list;
461 /* Server-side-only members */
463 /* protected by elt_lock */
464 struct list_head l_pending_chain; /* callbacks pending */
465 cfs_time_t l_callback_timeout; /* jiffies */
467 __u32 l_pid; /* pid which created this lock */
469 /* for ldlm_add_ast_work_item() */
470 struct list_head l_bl_ast;
471 struct list_head l_cp_ast;
472 struct ldlm_lock *l_blocking_lock;
475 /* protected by lr_lock, linkages to "skip lists" */
476 struct list_head l_sl_mode;
477 struct list_head l_sl_policy;
480 struct ldlm_resource {
481 struct ldlm_namespace *lr_namespace;
483 /* protected by ns_hash_lock */
484 struct list_head lr_hash;
485 struct ldlm_resource *lr_parent; /* 0 for a root resource */
486 struct list_head lr_children; /* list head for child resources */
487 struct list_head lr_childof; /* part of ns_root_list if root res,
488 * part of lr_children if child */
491 /* protected by lr_lock */
492 struct list_head lr_granted;
493 struct list_head lr_converting;
494 struct list_head lr_waiting;
495 ldlm_mode_t lr_most_restr;
496 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
497 struct ldlm_res_id lr_name;
498 atomic_t lr_refcount;
500 struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; /* interval trees*/
502 /* Server-side-only lock value block elements */
503 struct semaphore lr_lvb_sem;
507 /* when the resource was considered as contended */
508 cfs_time_t lr_contention_time;
511 struct ldlm_ast_work {
512 struct ldlm_lock *w_lock;
514 struct ldlm_lock_desc w_desc;
515 struct list_head w_list;
521 /* ldlm_enqueue parameters common */
522 struct ldlm_enqueue_info {
523 __u32 ei_type; /* Type of the lock being enqueued. */
524 __u32 ei_mode; /* Mode of the lock being enqueued. */
525 void *ei_cb_bl; /* Different callbacks for lock handling (blocking, */
526 void *ei_cb_cp; /* completion, glimpse) */
528 void *ei_cbdata; /* Data to be passed into callbacks. */
531 extern struct obd_ops ldlm_obd_ops;
533 extern char *ldlm_lockname[];
534 extern char *ldlm_typename[];
535 extern char *ldlm_it2str(int it);
537 #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
540 if (((level) & D_CANTMASK) != 0 || \
541 ((libcfs_debug & (level)) != 0 && \
542 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
543 static struct libcfs_debug_msg_data _ldlm_dbg_data = \
544 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, \
546 _ldlm_lock_debug(lock, level, &_ldlm_dbg_data, fmt, \
551 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
552 struct libcfs_debug_msg_data *data, const char *fmt,
554 __attribute__ ((format (printf, 4, 5)));
556 #define LDLM_ERROR(lock, fmt, a...) do { \
557 static cfs_debug_limit_state_t _ldlm_cdls; \
558 ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock, \
559 __FILE__, __FUNCTION__, __LINE__, \
563 #define LDLM_DEBUG(lock, fmt, a...) do { \
564 ldlm_lock_debug(NULL, D_DLMTRACE, lock, \
565 __FILE__, __FUNCTION__, __LINE__, \
569 #define LDLM_DEBUG_NOLOCK(format, a...) \
570 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
572 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
573 int first_enq, ldlm_error_t *err,
574 struct list_head *work_list);
580 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
581 #define LDLM_ITER_STOP 2 /* stop iterating */
583 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
584 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
586 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
588 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
590 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
591 ldlm_res_iterator_t iter, void *closure);
593 int ldlm_replay_locks(struct obd_import *imp);
594 void ldlm_resource_iterate(struct ldlm_namespace *, struct ldlm_res_id *,
595 ldlm_iterator_t iter, void *data);
598 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
601 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
605 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
606 void *data, int flag);
607 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
608 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
609 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
610 ldlm_blocking_callback, ldlm_glimpse_callback);
611 int ldlm_handle_convert(struct ptlrpc_request *req);
612 int ldlm_handle_cancel(struct ptlrpc_request *req);
613 int ldlm_request_cancel(struct ptlrpc_request *req,
614 struct ldlm_request *dlm_req, int first);
615 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
616 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock);
617 int ldlm_get_ref(void);
618 void ldlm_put_ref(void);
621 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
622 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
623 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh);
624 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *, int flags);
625 void ldlm_cancel_callback(struct ldlm_lock *);
626 int ldlm_lock_set_data(struct lustre_handle *, void *data);
627 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
628 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
629 struct lustre_handle *);
631 static inline struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *h)
633 return __ldlm_handle2lock(h, 0);
636 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
637 struct ptlrpc_request *r, int buf_idx,
640 if (res->lr_namespace->ns_lvbo &&
641 res->lr_namespace->ns_lvbo->lvbo_update) {
642 return res->lr_namespace->ns_lvbo->lvbo_update(res, r, buf_idx,
648 #define LDLM_LOCK_PUT(lock) \
650 /*LDLM_DEBUG((lock), "put");*/ \
651 ldlm_lock_put(lock); \
654 #define LDLM_LOCK_GET(lock) \
656 ldlm_lock_get(lock); \
657 /*LDLM_DEBUG((lock), "get");*/ \
661 #define ldlm_lock_list_put(head, member, count) \
663 struct ldlm_lock *_lock, *_next; \
665 list_for_each_entry_safe(_lock, _next, head, member) { \
668 list_del_init(&_lock->member); \
669 LDLM_LOCK_PUT(_lock); \
674 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
675 void ldlm_lock_put(struct ldlm_lock *lock);
676 void ldlm_lock_destroy(struct ldlm_lock *lock);
677 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
678 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
679 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
680 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
681 void ldlm_lock_allow_match(struct ldlm_lock *lock);
682 int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **);
683 void ldlm_lock_fast_release(void *, int);
684 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
685 struct ldlm_res_id *, ldlm_type_t type,
686 ldlm_policy_data_t *, ldlm_mode_t mode,
687 struct lustre_handle *);
688 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
690 void ldlm_lock_cancel(struct ldlm_lock *lock);
691 void ldlm_cancel_locks_for_export(struct obd_export *export);
692 void ldlm_reprocess_all(struct ldlm_resource *res);
693 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
694 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
695 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
696 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
699 struct ldlm_namespace *
700 ldlm_namespace_new(struct obd_device *obd, char *name,
701 ldlm_side_t client, ldlm_appetite_t apt);
702 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
703 void ldlm_namespace_free(struct ldlm_namespace *ns,
704 struct obd_import *imp, int force);
705 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
706 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
707 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
708 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
709 void ldlm_namespace_get_locked(struct ldlm_namespace *ns);
710 void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup);
711 void ldlm_namespace_get(struct ldlm_namespace *ns);
712 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup);
713 int ldlm_proc_setup(void);
715 void ldlm_proc_cleanup(void);
717 static inline void ldlm_proc_cleanup(void) {}
720 /* resource.c - internal */
721 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
722 struct ldlm_resource *parent,
723 struct ldlm_res_id, ldlm_type_t type,
725 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
726 int ldlm_resource_putref(struct ldlm_resource *res);
727 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
728 struct ldlm_lock *lock);
729 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
730 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
731 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
732 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
733 void ldlm_resource_dump(int level, struct ldlm_resource *);
734 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
738 int ldlm_expired_completion_wait(void *data);
739 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
740 void *data, int flag);
741 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
742 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
743 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **req,
744 struct ldlm_enqueue_info *einfo, struct ldlm_res_id res_id,
745 ldlm_policy_data_t *policy, int *flags,
746 void *lvb, __u32 lvb_len, void *lvb_swabber,
747 struct lustre_handle *lockh, int async);
748 struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
749 int bufcount, int *size,
750 struct list_head *head, int count);
751 struct ptlrpc_request *ldlm_prep_elc_req(struct obd_export *exp, int version,
752 int opc, int bufcount, int *size,
753 int bufoff, int canceloff,
754 struct list_head *cancels, int count);
755 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
756 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
757 int *flags, void *lvb, __u32 lvb_len,
758 void *lvb_swabber, struct lustre_handle *lockh,
760 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
761 struct ldlm_res_id *res_id,
762 ldlm_type_t type, ldlm_policy_data_t *policy,
763 ldlm_mode_t mode, int *flags,
764 ldlm_blocking_callback blocking,
765 ldlm_completion_callback completion,
766 ldlm_glimpse_callback glimpse,
767 void *data, __u32 lvb_len, void *lvb_swabber,
768 struct lustre_handle *lockh);
769 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
770 void *data, __u32 data_len);
771 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
772 int ldlm_cli_update_pool(struct ptlrpc_request *req);
773 int ldlm_cli_cancel(struct lustre_handle *lockh);
774 int ldlm_cli_cancel_unused(struct ldlm_namespace *, struct ldlm_res_id *,
775 int flags, void *opaque);
776 int ldlm_cli_cancel_req(struct obd_export *exp,
777 struct list_head *head, int count);
778 int ldlm_cli_join_lru(struct ldlm_namespace *, struct ldlm_res_id *, int join);
779 int ldlm_cancel_resource_local(struct ldlm_resource *res,
780 struct list_head *cancels,
781 ldlm_policy_data_t *policy, ldlm_mode_t mode,
782 int lock_flags, int cancel_flags, void *opaque);
783 int ldlm_cli_cancel_list(struct list_head *head, int count,
784 struct ptlrpc_request *req, int off);
786 /* This has to be here because recursive inclusion sucks. */
787 int intent_disposition(struct ldlm_reply *rep, int flag);
788 void intent_set_disposition(struct ldlm_reply *rep, int flag);
791 /* ioctls for trying requests */
792 #define IOC_LDLM_TYPE 'f'
793 #define IOC_LDLM_MIN_NR 40
795 #define IOC_LDLM_TEST _IOWR('f', 40, long)
796 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
797 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
798 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
799 #define IOC_LDLM_MAX_NR 43
801 static inline void lock_res(struct ldlm_resource *res)
803 spin_lock(&res->lr_lock);
806 static inline void unlock_res(struct ldlm_resource *res)
808 spin_unlock(&res->lr_lock);
811 static inline void check_res_locked(struct ldlm_resource *res)
813 LASSERT_SPIN_LOCKED(&res->lr_lock);
816 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
817 void unlock_res_and_lock(struct ldlm_lock *lock);
820 void ldlm_pools_recalc(ldlm_side_t client);
821 int ldlm_pools_init(void);
822 void ldlm_pools_fini(void);
823 void ldlm_pools_wakeup(void);
825 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
826 int idx, ldlm_side_t client);
827 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
828 unsigned int gfp_mask);
829 void ldlm_pool_fini(struct ldlm_pool *pl);
830 int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
831 int ldlm_pool_recalc(struct ldlm_pool *pl);
832 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
833 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
834 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
835 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
836 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
837 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
838 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
839 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
840 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);