1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * (visit-tags-table FILE)
3 * vim:expandtab:shiftwidth=8:tabstop=8:
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
10 #include <linux/lustre_dlm.h>
11 #elif defined(__APPLE__)
12 #include <darwin/lustre_dlm.h>
13 #elif defined(__WINNT__)
14 #include <winnt/lustre_dlm.h>
16 #error Unsupported operating system.
19 #include <lustre_lib.h>
20 #include <lustre_net.h>
21 #include <lustre_import.h>
22 #include <lustre_handles.h>
23 #include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
28 #define OBD_LDLM_DEVICENAME "ldlm"
30 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
31 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
36 ELDLM_LOCK_CHANGED = 300,
37 ELDLM_LOCK_ABORTED = 301,
38 ELDLM_LOCK_REPLACED = 302,
39 ELDLM_NO_LOCK_DATA = 303,
41 ELDLM_NAMESPACE_EXISTS = 400,
42 ELDLM_BAD_NAMESPACE = 401
46 LDLM_NAMESPACE_SERVER = 0,
47 LDLM_NAMESPACE_CLIENT = 1
50 #define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
52 /* If the server returns one of these flags, then the lock was put on that list.
53 * If the client sends one of these flags (during recovery ONLY!), it wants the
54 * lock added to the specified list, no questions asked. -p */
55 #define LDLM_FL_BLOCK_GRANTED 0x000002
56 #define LDLM_FL_BLOCK_CONV 0x000004
57 #define LDLM_FL_BLOCK_WAIT 0x000008
59 #define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
60 #define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */
61 #define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
62 #define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
64 /* Lock is being replayed. This could probably be implied by the fact that one
65 * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
66 #define LDLM_FL_REPLAY 0x000100
68 #define LDLM_FL_INTENT_ONLY 0x000200 /* don't grant lock, just do intent */
69 #define LDLM_FL_LOCAL_ONLY 0x000400 /* see ldlm_cli_cancel_unused */
71 /* don't run the cancel callback under ldlm_cli_cancel_unused */
72 #define LDLM_FL_FAILED 0x000800
74 #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
75 #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */
76 #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */
77 #define LDLM_FL_WARN 0x008000 /* see ldlm_cli_cancel_unused */
78 #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
80 #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
83 /* file & record locking */
84 #define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked
85 #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
87 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
88 * the LVB filling happens _after_ the lock has been granted, so another thread
89 * can match`t before the LVB has been updated. As a dirty hack, we set
90 * LDLM_FL_LVB_READY only after we've done the LVB poop.
91 * this is only needed on lov/osc now, where lvb is actually used and callers
92 * must set it in input flags.
94 * The proper fix is to do the granting inside of the completion AST, which can
95 * be replaced with a LVB-aware wrapping function for OSC locks. That change is
96 * pretty high-risk, though, and would need a lot more testing. */
98 #define LDLM_FL_LVB_READY 0x100000
100 /* A lock contributes to the kms calculation until it has finished the part
101 * of it's cancelation that performs write back on its dirty pages. It
102 * can remain on the granted list during this whole time. Threads racing
103 * to update the kms after performing their writeback need to know to
104 * exclude each others locks from the calculation as they walk the granted
106 #define LDLM_FL_KMS_IGNORE 0x200000
108 /* Don't drop lock covering mmapped file in LRU */
109 #define LDLM_FL_NO_LRU 0x400000
111 /* Immediatelly cancel such locks when they block some other locks. Send
112 cancel notification to original lock holder, but expect no reply. */
113 #define LDLM_FL_CANCEL_ON_BLOCK 0x800000
115 /* Flags flags inherited from parent lock when doing intents. */
116 #define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
118 /* These are flags that are mapped into the flags and ASTs of blocking locks */
119 #define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
120 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
121 #define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
123 /* completion ast to be executed */
124 #define LDLM_FL_CP_REQD 0x1000000
126 /* cleanup_resource has already handled the lock */
127 #define LDLM_FL_CLEANED 0x2000000
129 /* optimization hint: LDLM can run blocking callback from current context
130 * w/o involving separate thread. in order to decrease cs rate */
131 #define LDLM_FL_ATOMIC_CB 0x4000000
133 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
134 * such that server send blocking ast for conflict locks to this client for
135 * the 1st operation, whereas the 2nd operation has canceled this lock and
136 * is waiting for rpc_lock which is taken by the 1st operation.
137 * LDLM_FL_BL_AST is to be set by ldlm_callback_handler() to the lock not allow
138 * ELC code to cancel it.
139 * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
140 * droped to let ldlm_callback_handler() return EINVAL to the server. It is
141 * used when ELC rpc is already prepared and is waiting for rpc_lock, too late
142 * to send a separate CANCEL rpc. */
143 #define LDLM_FL_BL_AST 0x10000000
144 #define LDLM_FL_BL_DONE 0x20000000
146 /* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */
147 #define LDLM_FL_ASYNC 0x40000000
149 /* The blocking callback is overloaded to perform two functions. These flags
150 * indicate which operation should be performed. */
151 #define LDLM_CB_BLOCKING 1
152 #define LDLM_CB_CANCELING 2
154 /* position flag of skip list pointers */
155 #define LDLM_SL_HEAD(skip_list) ((skip_list)->next != NULL)
156 #define LDLM_SL_TAIL(skip_list) ((skip_list)->prev != NULL)
157 #define LDLM_SL_EMPTY(skip_list) ((skip_list)->next == NULL && \
158 (skip_list)->prev == NULL)
160 /* compatibility matrix */
161 #define LCK_COMPAT_EX LCK_NL
162 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
163 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
164 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
165 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
166 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
167 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
169 extern ldlm_mode_t lck_compat_array[];
171 static inline void lockmode_verify(ldlm_mode_t mode)
173 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
176 static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new)
178 return (lck_compat_array[exist] & new);
183 * cluster name spaces
187 #define DLM_OST_NAMESPACE 1
188 #define DLM_MDS_NAMESPACE 2
191 - do we just separate this by security domains and use a prefix for
192 multiple namespaces in the same domain?
202 * waiting_locks_spinlock
217 struct ldlm_resource;
218 struct ldlm_namespace;
220 typedef int (*ldlm_pool_recalc_t)(struct ldlm_pool *pl);
222 typedef int (*ldlm_pool_shrink_t)(struct ldlm_pool *pl,
223 int nr, unsigned int gfp_mask);
226 LDLM_POOL_CTL_RECALC = 1 << 0, /* Pool recalc is enabled */
227 LDLM_POOL_CTL_SHRINK = 1 << 1, /* Pool shrink is enabled */
228 LDLM_POOL_CTL_FULL = (LDLM_POOL_CTL_RECALC | LDLM_POOL_CTL_SHRINK)
231 /* One second for pools thread check interval. */
232 #define LDLM_POOLS_THREAD_PERIOD (1)
234 /* 5% margin for modest pools. See ldlm_pool.c for details. */
235 #define LDLM_POOLS_MODEST_MARGIN (5)
237 /* A change to SLV in % after which we want to wake up pools thread asap. */
238 #define LDLM_POOLS_FAST_SLV_CHANGE (5)
241 /* Common pool fields */
242 cfs_proc_dir_entry_t *pl_proc_dir; /* Pool proc directory. */
243 char pl_name[100]; /* Pool name, should be long
244 * enough to contain complex
245 * proc entry name. */
246 spinlock_t pl_lock; /* Lock for protecting slv/clv
248 atomic_t pl_limit; /* Number of allowed locks in
249 * in pool, both, client and
251 atomic_t pl_granted; /* Number of granted locks. */
252 atomic_t pl_grant_rate; /* Grant rate per T. */
253 atomic_t pl_cancel_rate; /* Cancel rate per T. */
254 atomic_t pl_grant_speed; /* Grant speed (GR - CR) per T. */
255 __u64 pl_server_lock_volume; /* Server lock volume. Protected
257 cfs_time_t pl_update_time; /* Time when last slv from server
259 ldlm_pool_recalc_t pl_recalc; /* Recalc callback func pointer. */
260 ldlm_pool_shrink_t pl_shrink; /* Shrink callback func pointer. */
261 int pl_control; /* Pool features mask */
263 /* Server side pool fields */
264 atomic_t pl_grant_plan; /* Planned number of granted
265 * locks for next T. */
266 atomic_t pl_grant_step; /* Grant plan step for next T. */
268 /* Client side pool related fields */
269 atomic_t pl_lock_volume_factor; /* Lock volume factor. */
270 struct lprocfs_stats *pl_stats; /* Pool statistics. */
273 static inline int pool_recalc_enabled(struct ldlm_pool *pl)
275 return pl->pl_control & LDLM_POOL_CTL_RECALC;
278 static inline int pool_shrink_enabled(struct ldlm_pool *pl)
280 return pl->pl_control & LDLM_POOL_CTL_SHRINK;
283 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
284 void *req_cookie, ldlm_mode_t mode, int flags,
287 struct ldlm_valblock_ops {
288 int (*lvbo_init)(struct ldlm_resource *res);
289 int (*lvbo_update)(struct ldlm_resource *res, struct lustre_msg *m,
290 int buf_idx, int increase);
294 LDLM_NAMESPACE_GREEDY = 1 << 0,
295 LDLM_NAMESPACE_MODEST = 1 << 1
298 struct ldlm_namespace {
300 ldlm_side_t ns_client; /* is this a client-side lock tree? */
301 __u64 ns_connect_flags; /* client side connect flags
302 * supported by server */
303 struct list_head *ns_hash; /* hash table for ns */
304 spinlock_t ns_hash_lock;
305 __u32 ns_refcount; /* count of resources in the hash */
306 struct list_head ns_root_list; /* all root resources in ns */
307 struct list_head ns_list_chain; /* position in global NS list */
309 struct list_head ns_unused_list; /* all root resources in ns */
311 spinlock_t ns_unused_lock;
313 unsigned int ns_max_unused;
314 unsigned int ns_max_age;
315 cfs_time_t ns_next_dump; /* next debug dump, jiffies */
319 ldlm_res_policy ns_policy;
320 struct ldlm_valblock_ops *ns_lvbo;
322 cfs_waitq_t ns_waitq;
323 struct ldlm_pool ns_pool;
324 ldlm_appetite_t ns_appetite;
327 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
330 return ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE;
335 * Resource hash table
339 #define RES_HASH_BITS 10
340 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
341 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
345 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
346 struct ldlm_lock_desc *new, void *data,
348 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
350 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
353 struct portals_handle l_handle; // must be first in the structure
356 /* internal spinlock protects l_resource. we should hold this lock
357 * first before grabbing res_lock.*/
360 /* ldlm_lock_change_resource() can change this */
361 struct ldlm_resource *l_resource;
363 /* protected by ns_hash_lock. FIXME */
364 struct list_head l_lru;
366 /* protected by lr_lock */
367 struct list_head l_res_link; // position in one of three res lists
369 struct list_head l_sl_mode; // skip pointer for request mode
370 struct list_head l_sl_policy; // skip pointer for inodebits
372 /* protected by led_lock */
373 struct list_head l_export_chain; // per-export chain of locks
375 /* protected by lr_lock */
376 ldlm_mode_t l_req_mode;
377 ldlm_mode_t l_granted_mode;
379 ldlm_completion_callback l_completion_ast;
380 ldlm_blocking_callback l_blocking_ast;
381 ldlm_glimpse_callback l_glimpse_ast;
383 struct obd_export *l_export;
384 struct obd_export *l_conn_export;
386 struct lustre_handle l_remote_handle;
387 ldlm_policy_data_t l_policy_data;
389 /* protected by lr_lock */
395 /* If the lock is granted, a process sleeps on this waitq to learn when
396 * it's no longer in use. If the lock is not granted, a process sleeps
397 * on this waitq to learn when it becomes granted. */
399 struct timeval l_enqueued_time;
401 cfs_time_t l_last_used; /* jiffies */
402 struct ldlm_extent l_req_extent;
404 /* Client-side-only members */
405 __u32 l_lvb_len; /* temporary storage for */
406 void *l_lvb_data; /* an LVB received during */
407 void *l_lvb_swabber; /* an enqueue */
410 /* Server-side-only members */
412 /* protected by elt_lock */
413 struct list_head l_pending_chain; /* callbacks pending */
414 cfs_time_t l_callback_timeout; /* jiffies */
416 __u32 l_pid; /* pid which created this lock */
418 /* for ldlm_add_ast_work_item() */
419 struct list_head l_bl_ast;
420 struct list_head l_cp_ast;
421 struct ldlm_lock *l_blocking_lock;
425 struct ldlm_resource {
426 struct ldlm_namespace *lr_namespace;
428 /* protected by ns_hash_lock */
429 struct list_head lr_hash;
430 struct ldlm_resource *lr_parent; /* 0 for a root resource */
431 struct list_head lr_children; /* list head for child resources */
432 struct list_head lr_childof; /* part of ns_root_list if root res,
433 * part of lr_children if child */
436 /* protected by lr_lock */
437 struct list_head lr_granted;
438 struct list_head lr_converting;
439 struct list_head lr_waiting;
440 ldlm_mode_t lr_most_restr;
441 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
442 struct ldlm_res_id lr_name;
443 atomic_t lr_refcount;
445 /* Server-side-only lock value block elements */
446 struct semaphore lr_lvb_sem;
451 struct ldlm_ast_work {
452 struct ldlm_lock *w_lock;
454 struct ldlm_lock_desc w_desc;
455 struct list_head w_list;
461 /* ldlm_enqueue parameters common */
462 struct ldlm_enqueue_info {
463 __u32 ei_type; /* Type of the lock being enqueued. */
464 __u32 ei_mode; /* Mode of the lock being enqueued. */
465 void *ei_cb_bl; /* Different callbacks for lock handling (blocking, */
466 void *ei_cb_cp; /* completion, glimpse) */
468 void *ei_cbdata; /* Data to be passed into callbacks. */
471 extern struct obd_ops ldlm_obd_ops;
473 extern char *ldlm_lockname[];
474 extern char *ldlm_typename[];
475 extern char *ldlm_it2str(int it);
477 #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
480 if (((level) & D_CANTMASK) != 0 || \
481 ((libcfs_debug & (level)) != 0 && \
482 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
483 static struct libcfs_debug_msg_data _ldlm_dbg_data = \
484 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, \
486 _ldlm_lock_debug(lock, level, &_ldlm_dbg_data, fmt, \
491 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
492 struct libcfs_debug_msg_data *data, const char *fmt,
494 __attribute__ ((format (printf, 4, 5)));
496 #define LDLM_ERROR(lock, fmt, a...) do { \
497 static cfs_debug_limit_state_t _ldlm_cdls; \
498 ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock, \
499 __FILE__, __FUNCTION__, __LINE__, \
503 #define LDLM_DEBUG(lock, fmt, a...) do { \
504 ldlm_lock_debug(NULL, D_DLMTRACE, lock, \
505 __FILE__, __FUNCTION__, __LINE__, \
509 #define LDLM_DEBUG(lock, fmt, a...) ((void)0)
510 #define LDLM_ERROR(lock, fmt, a...) ((void)0)
513 #define LDLM_DEBUG_NOLOCK(format, a...) \
514 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
516 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
517 int first_enq, ldlm_error_t *err,
518 struct list_head *work_list);
524 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
525 #define LDLM_ITER_STOP 2 /* stop iterating */
527 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
528 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
530 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
532 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
534 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
535 ldlm_res_iterator_t iter, void *closure);
537 int ldlm_replay_locks(struct obd_import *imp);
538 void ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
539 ldlm_iterator_t iter, void *data);
542 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
545 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
549 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
550 void *data, int flag);
551 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
552 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
553 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
554 ldlm_blocking_callback, ldlm_glimpse_callback);
555 int ldlm_handle_convert(struct ptlrpc_request *req);
556 int ldlm_handle_cancel(struct ptlrpc_request *req);
557 int ldlm_request_cancel(struct ptlrpc_request *req,
558 const struct ldlm_request *dlm_req, int first);
559 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
560 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock);
561 void ldlm_revoke_export_locks(struct obd_export *exp);
562 int ldlm_get_ref(ldlm_side_t client);
563 void ldlm_put_ref(ldlm_side_t client, int force);
566 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
567 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
568 void ldlm_lock2handle(const struct ldlm_lock *lock,
569 struct lustre_handle *lockh);
570 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
571 void ldlm_cancel_callback(struct ldlm_lock *);
572 int ldlm_lock_set_data(struct lustre_handle *, void *data);
573 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
574 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
575 const struct lustre_handle *);
577 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
579 return __ldlm_handle2lock(h, 0);
582 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
583 struct lustre_msg *m, int buf_idx,
586 if (res->lr_namespace->ns_lvbo &&
587 res->lr_namespace->ns_lvbo->lvbo_update) {
588 return res->lr_namespace->ns_lvbo->lvbo_update(res, m, buf_idx,
594 #define LDLM_LOCK_PUT(lock) \
596 /*LDLM_DEBUG((lock), "put");*/ \
597 ldlm_lock_put(lock); \
600 #define LDLM_LOCK_GET(lock) \
602 ldlm_lock_get(lock); \
603 /*LDLM_DEBUG((lock), "get");*/ \
607 #define ldlm_lock_list_put(head, member, count) \
609 struct ldlm_lock *_lock, *_next; \
611 list_for_each_entry_safe(_lock, _next, head, member) { \
614 list_del_init(&_lock->member); \
615 LDLM_LOCK_PUT(_lock); \
620 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
621 void ldlm_lock_put(struct ldlm_lock *lock);
622 void ldlm_lock_destroy(struct ldlm_lock *lock);
623 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
624 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
625 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
626 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
627 void ldlm_lock_allow_match(struct ldlm_lock *lock);
628 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
629 const struct ldlm_res_id *, ldlm_type_t type,
630 ldlm_policy_data_t *, ldlm_mode_t mode,
631 struct lustre_handle *);
632 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
634 void ldlm_lock_cancel(struct ldlm_lock *lock);
635 void ldlm_cancel_locks_for_export(struct obd_export *export);
636 void ldlm_reprocess_all(struct ldlm_resource *res);
637 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
638 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
639 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
640 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
643 struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client,
644 ldlm_appetite_t apt);
645 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
646 int ldlm_namespace_free(struct ldlm_namespace *ns, int force);
647 void ldlm_namespace_move(struct ldlm_namespace *ns, ldlm_side_t client);
648 struct ldlm_namespace *ldlm_namespace_first(ldlm_side_t client);
649 void ldlm_namespace_get(struct ldlm_namespace *ns);
650 void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup);
651 void ldlm_namespace_get_nolock(struct ldlm_namespace *ns);
652 void ldlm_namespace_put_nolock(struct ldlm_namespace *ns, int wakeup);
653 int ldlm_proc_setup(void);
655 void ldlm_proc_cleanup(void);
657 static inline void ldlm_proc_cleanup(void) {}
660 /* resource.c - internal */
661 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
662 struct ldlm_resource *parent,
663 const struct ldlm_res_id *,
664 ldlm_type_t type, int create);
665 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
666 int ldlm_resource_putref(struct ldlm_resource *res);
667 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
668 struct ldlm_lock *lock);
669 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
670 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
671 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
672 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
673 void ldlm_resource_dump(int level, struct ldlm_resource *);
674 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
675 const struct ldlm_res_id *);
677 struct ldlm_callback_suite {
678 ldlm_completion_callback lcs_completion;
679 ldlm_blocking_callback lcs_blocking;
680 ldlm_glimpse_callback lcs_glimpse;
684 int ldlm_expired_completion_wait(void *data);
685 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
686 void *data, int flag);
687 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
688 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
689 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
690 struct ldlm_enqueue_info *einfo,
691 const struct ldlm_res_id *res_id,
692 ldlm_policy_data_t *policy, int *flags,
693 void *lvb, __u32 lvb_len, void *lvb_swabber,
694 struct lustre_handle *lockh, int async);
695 struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
696 int bufcount, int *size,
697 struct list_head *head, int count);
698 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
699 const struct ldlm_request *dlm_req,
700 const struct ldlm_callback_suite *cbs);
701 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
702 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
703 int *flags, void *lvb, __u32 lvb_len,
704 void *lvb_swabber, struct lustre_handle *lockh,
706 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
707 const struct ldlm_res_id *res_id,
708 ldlm_type_t type, ldlm_policy_data_t *policy,
709 ldlm_mode_t mode, int *flags,
710 ldlm_blocking_callback blocking,
711 ldlm_completion_callback completion,
712 ldlm_glimpse_callback glimpse,
713 void *data, __u32 lvb_len, void *lvb_swabber,
714 struct lustre_handle *lockh);
715 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
716 void *data, __u32 data_len);
717 int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
718 int ldlm_cli_update_pool(struct ptlrpc_request *req);
719 int ldlm_handle_convert0(struct ptlrpc_request *req,
720 const struct ldlm_request *dlm_req);
721 int ldlm_cli_cancel(struct lustre_handle *lockh);
722 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
723 int flags, void *opaque);
724 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
725 const struct ldlm_res_id *res_id,
726 ldlm_policy_data_t *policy,
727 ldlm_mode_t mode, int flags, void *opaque);
728 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
729 int count, int flags);
730 int ldlm_cli_join_lru(struct ldlm_namespace *,
731 const struct ldlm_res_id *, int join);
732 int ldlm_cancel_resource_local(struct ldlm_resource *res,
733 struct list_head *cancels,
734 ldlm_policy_data_t *policy,
735 ldlm_mode_t mode, int lock_flags,
736 int flags, void *opaque);
737 int ldlm_cli_cancel_list(struct list_head *head, int count,
738 struct ptlrpc_request *req, int off, int flags);
741 /* This has to be here because recursive inclusion sucks. */
742 int intent_disposition(struct ldlm_reply *rep, int flag);
743 void intent_set_disposition(struct ldlm_reply *rep, int flag);
746 /* ioctls for trying requests */
747 #define IOC_LDLM_TYPE 'f'
748 #define IOC_LDLM_MIN_NR 40
750 #define IOC_LDLM_TEST _IOWR('f', 40, long)
751 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
752 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
753 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
754 #define IOC_LDLM_MAX_NR 43
756 static inline void lock_res(struct ldlm_resource *res)
758 spin_lock(&res->lr_lock);
761 static inline void unlock_res(struct ldlm_resource *res)
763 spin_unlock(&res->lr_lock);
766 static inline void check_res_locked(struct ldlm_resource *res)
768 LASSERT_SPIN_LOCKED(&res->lr_lock);
771 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
772 void unlock_res_and_lock(struct ldlm_lock *lock);
775 int ldlm_pools_init(ldlm_side_t client);
776 void ldlm_pools_recalc(ldlm_side_t client);
777 void ldlm_pools_fini(void);
778 void ldlm_pools_wakeup(void);
780 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
781 int idx, ldlm_side_t client);
782 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
783 unsigned int gfp_mask);
784 void ldlm_pool_fini(struct ldlm_pool *pl);
785 int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit);
786 int ldlm_pool_recalc(struct ldlm_pool *pl);
787 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
788 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
789 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
790 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
791 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
792 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);