Whamcloud - gitweb
ef991c2da33ca8d6d383f6dfad2ccbb8659af23c
[fs/lustre-release.git] / lustre / include / linux / lustre_dlm.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * (visit-tags-table FILE)
3  * vim:expandtab:shiftwidth=8:tabstop=8:
4  */
5
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
8
9 #ifdef __KERNEL__
10 # include <linux/proc_fs.h>
11 #endif
12
13 #include <linux/lustre_lib.h>
14 #include <linux/lustre_net.h>
15 #include <linux/lustre_import.h>
16 #include <linux/lustre_handles.h>
17 #include <linux/lustre_export.h> /* for obd_export, for LDLM_DEBUG */
18
19 struct obd_ops;
20 struct obd_device;
21
22 #define OBD_LDLM_DEVICENAME  "ldlm"
23
24 #define LDLM_DEFAULT_LRU_SIZE 100
25
26 typedef enum {
27         ELDLM_OK = 0,
28
29         ELDLM_LOCK_CHANGED = 300,
30         ELDLM_LOCK_ABORTED = 301,
31         ELDLM_LOCK_REPLACED = 302,
32         ELDLM_NO_LOCK_DATA = 303,
33
34         ELDLM_NAMESPACE_EXISTS = 400,
35         ELDLM_BAD_NAMESPACE    = 401
36 } ldlm_error_t;
37
38 #define LDLM_NAMESPACE_SERVER 0
39 #define LDLM_NAMESPACE_CLIENT 1
40
41 #define LDLM_FL_LOCK_CHANGED   0x000001 /* extent, mode, or resource changed */
42
43 /* If the server returns one of these flags, then the lock was put on that list.
44  * If the client sends one of these flags (during recovery ONLY!), it wants the
45  * lock added to the specified list, no questions asked. -p */
46 #define LDLM_FL_BLOCK_GRANTED  0x000002
47 #define LDLM_FL_BLOCK_CONV     0x000004
48 #define LDLM_FL_BLOCK_WAIT     0x000008
49
50 #define LDLM_FL_CBPENDING      0x000010 /* this lock is being destroyed */
51 #define LDLM_FL_AST_SENT       0x000020 /* blocking or cancel packet was sent */
52 #define LDLM_FL_WAIT_NOREPROC  0x000040 /* not a real flag, not saved in lock */
53 #define LDLM_FL_CANCEL         0x000080 /* cancellation callback already run */
54
55 /* Lock is being replayed.  This could probably be implied by the fact that one
56  * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
57 #define LDLM_FL_REPLAY         0x000100
58
59 #define LDLM_FL_INTENT_ONLY    0x000200 /* don't grant lock, just do intent */
60 #define LDLM_FL_LOCAL_ONLY     0x000400 /* see ldlm_cli_cancel_unused */
61
62 /* don't run the cancel callback under ldlm_cli_cancel_unused */
63 #define LDLM_FL_FAILED         0x000800
64
65 #define LDLM_FL_HAS_INTENT     0x001000 /* lock request has intent */
66 #define LDLM_FL_CANCELING      0x002000 /* lock cancel has already been sent */
67 #define LDLM_FL_LOCAL          0x004000 /* local lock (ie, no srv/cli split) */
68 #define LDLM_FL_WARN           0x008000 /* see ldlm_cli_cancel_unused */
69 #define LDLM_FL_DISCARD_DATA   0x010000 /* discard (no writeback) on cancel */
70 #define LDLM_FL_CONFIG_CHANGE  0x020000 /* see ldlm_cli_cancel_unused */
71
72 #define LDLM_FL_NO_TIMEOUT     0x020000 /* Blocked by group lock - wait
73                                          * indefinitely */
74
75 /* file & record locking */
76 #define LDLM_FL_BLOCK_NOWAIT   0x040000 // server told not to wait if blocked
77 #define LDLM_FL_TEST_LOCK      0x080000 // return blocking lock
78
79 /* These are flags that are mapped into the flags and ASTs of blocking locks */
80 #define LDLM_AST_DISCARD_DATA  0x80000000 /* Add FL_DISCARD to blocking ASTs */
81 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
82 #define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
83
84 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
85  * the LVB filling happens _after_ the lock has been granted, so another thread
86  * can match before the LVB has been updated.  As a dirty hack, we set
87  * LDLM_FL_CAN_MATCH only after we've done the LVB poop.
88  *
89  * The proper fix is to do the granting inside of the completion AST, which can
90  * be replaced with a LVB-aware wrapping function for OSC locks.  That change is
91  * pretty high-risk, though, and would need a lot more testing. */
92 #define LDLM_FL_CAN_MATCH      0x100000
93
94 /* A lock contributes to the kms calculation until it has finished the part
95  * of it's cancelation that performs write back on its dirty pages.  It
96  * can remain on the granted list during this whole time.  Threads racing
97  * to update the kms after performing their writeback need to know to
98  * exclude each others locks from the calculation as they walk the granted
99  * list. */
100 #define LDLM_FL_KMS_IGNORE     0x200000
101
102 /* The blocking callback is overloaded to perform two functions.  These flags
103  * indicate which operation should be performed. */
104 #define LDLM_CB_BLOCKING    1
105 #define LDLM_CB_CANCELING   2
106
107 /* compatibility matrix */
108 #define LCK_COMPAT_EX  LCK_NL
109 #define LCK_COMPAT_PW  (LCK_COMPAT_EX | LCK_CR)
110 #define LCK_COMPAT_PR  (LCK_COMPAT_PW | LCK_PR)
111 #define LCK_COMPAT_CW  (LCK_COMPAT_PW | LCK_CW)
112 #define LCK_COMPAT_CR  (LCK_COMPAT_CW | LCK_PR | LCK_PW)
113 #define LCK_COMPAT_NL  (LCK_COMPAT_CR | LCK_EX)
114 #define LCK_COMPAT_GROUP  (LCK_GROUP | LCK_NL)
115
116 static ldlm_mode_t lck_compat_array[] = {
117         [LCK_EX] LCK_COMPAT_EX,
118         [LCK_PW] LCK_COMPAT_PW,
119         [LCK_PR] LCK_COMPAT_PR,
120         [LCK_CW] LCK_COMPAT_CW,
121         [LCK_CR] LCK_COMPAT_CR,
122         [LCK_NL] LCK_COMPAT_NL,
123         [LCK_GROUP] LCK_COMPAT_GROUP
124 };
125
126 static inline void lockmode_verify(ldlm_mode_t mode)
127 {
128        LASSERT(mode >= LCK_EX && mode <= LCK_GROUP);
129 }
130
131 static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new)
132 {
133        return (lck_compat_array[exist] & new);
134 }
135
136 /*
137  *
138  * cluster name spaces
139  *
140  */
141
142 #define DLM_OST_NAMESPACE 1
143 #define DLM_MDS_NAMESPACE 2
144
145 /* XXX
146    - do we just separate this by security domains and use a prefix for
147      multiple namespaces in the same domain?
148    -
149 */
150
151 struct ldlm_lock;
152 struct ldlm_resource;
153 struct ldlm_namespace;
154
155 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
156                                void *req_cookie, ldlm_mode_t mode, int flags,
157                                void *data);
158
159 struct ldlm_valblock_ops {
160         int (*lvbo_init)(struct ldlm_resource *res);
161         int (*lvbo_update)(struct ldlm_resource *res, struct lustre_msg *m,
162                            int buf_idx, int increase);
163 };
164
165 struct ldlm_namespace {
166         char                  *ns_name;
167         __u32                  ns_client; /* is this a client-side lock tree? */
168         struct list_head      *ns_hash; /* hash table for ns */
169         __u32                  ns_refcount; /* count of resources in the hash */
170         struct list_head       ns_root_list; /* all root resources in ns */
171         struct lustre_lock     ns_lock; /* protects hash, refcount, list */
172         struct list_head       ns_list_chain; /* position in global NS list */
173         /*
174         struct proc_dir_entry *ns_proc_dir;
175         */
176
177         struct list_head       ns_unused_list; /* all root resources in ns */
178         int                    ns_nr_unused;
179         unsigned int           ns_max_unused;
180
181         spinlock_t             ns_counter_lock;
182         __u64                  ns_locks;
183         __u64                  ns_resources;
184         ldlm_res_policy        ns_policy;
185         struct ldlm_valblock_ops *ns_lvbo;
186         void                  *ns_lvbp;
187         wait_queue_head_t      ns_waitq;
188 };
189
190 /*
191  *
192  * Resource hash table
193  *
194  */
195
196 #define RES_HASH_BITS 10
197 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
198 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
199
200 struct ldlm_lock;
201
202 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
203                                       struct ldlm_lock_desc *new, void *data,
204                                       int flag);
205 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
206                                         void *data);
207 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
208
209 struct ldlm_lock {
210         struct portals_handle l_handle; // must be first in the structure
211         atomic_t              l_refc;
212         struct ldlm_resource *l_resource;
213         struct ldlm_lock     *l_parent;
214         struct list_head      l_children;
215         struct list_head      l_childof;
216         struct list_head      l_lru;
217         struct list_head      l_res_link; // position in one of three res lists
218         struct list_head      l_export_chain; // per-export chain of locks
219
220         ldlm_mode_t           l_req_mode;
221         ldlm_mode_t           l_granted_mode;
222
223         ldlm_completion_callback l_completion_ast;
224         ldlm_blocking_callback   l_blocking_ast;
225         ldlm_glimpse_callback    l_glimpse_ast;
226
227         struct obd_export    *l_export;
228         struct obd_export    *l_conn_export;
229         __u32                 l_flags;
230         struct lustre_handle  l_remote_handle;
231         ldlm_policy_data_t    l_policy_data;
232
233         __u32                 l_readers;
234         __u32                 l_writers;
235         __u8                  l_destroyed;
236
237         /* If the lock is granted, a process sleeps on this waitq to learn when
238          * it's no longer in use.  If the lock is not granted, a process sleeps
239          * on this waitq to learn when it becomes granted. */
240         wait_queue_head_t     l_waitq;
241         struct timeval        l_enqueued_time;
242
243         unsigned long         l_last_used;      /* jiffies */
244         struct ldlm_extent    l_req_extent;
245
246         /* Client-side-only members */
247         __u32                 l_lvb_len;        /* temporary storage for */
248         void                 *l_lvb_data;       /* an LVB received during */
249         void                 *l_lvb_swabber;    /* an enqueue */
250         void                 *l_ast_data;
251
252         /* Server-side-only members */
253         struct list_head      l_pending_chain;  /* callbacks pending */
254         unsigned long         l_callback_timeout;
255 };
256
257 #define LDLM_PLAIN       10
258 #define LDLM_EXTENT      11
259 #define LDLM_FLOCK       12
260 #define LDLM_IBITS       13
261
262 #define LDLM_MIN_TYPE 10
263 #define LDLM_MAX_TYPE 13
264
265 struct ldlm_resource {
266         struct ldlm_namespace *lr_namespace;
267         struct list_head       lr_hash;
268         struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
269         struct list_head       lr_children; /* list head for child resources */
270         struct list_head       lr_childof;  /* part of ns_root_list if root res,
271                                              * part of lr_children if child */
272
273         struct list_head       lr_granted;
274         struct list_head       lr_converting;
275         struct list_head       lr_waiting;
276         ldlm_mode_t            lr_most_restr;
277         __u32                  lr_type; /* LDLM_PLAIN or LDLM_EXTENT */
278         struct ldlm_resource  *lr_root;
279         struct ldlm_res_id     lr_name;
280         atomic_t               lr_refcount;
281
282         /* Server-side-only lock value block elements */
283         struct semaphore       lr_lvb_sem;
284         __u32                  lr_lvb_len;
285         void                  *lr_lvb_data;
286
287         /* lr_tmp holds a list head temporarily, during the building of a work
288          * queue.  see ldlm_add_ast_work_item and ldlm_run_ast_work */
289         void                  *lr_tmp;
290 };
291
292 struct ldlm_ast_work {
293         struct ldlm_lock *w_lock;
294         int               w_blocking;
295         struct ldlm_lock_desc w_desc;
296         struct list_head   w_list;
297         int w_flags;
298         void *w_data;
299         int w_datalen;
300 };
301
302 extern struct obd_ops ldlm_obd_ops;
303
304 extern char *ldlm_lockname[];
305 extern char *ldlm_typename[];
306 extern char *ldlm_it2str(int it);
307
308 #define __LDLM_DEBUG(level, lock, format, a...)                               \
309 do {                                                                          \
310         if (lock->l_resource == NULL) {                                       \
311                 CDEBUG(level, "### " format                                   \
312                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "\
313                        "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: "   \
314                        LPX64" expref: %d\n" , ## a, lock,                     \
315                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
316                        lock->l_readers, lock->l_writers,                      \
317                        ldlm_lockname[lock->l_granted_mode],                   \
318                        ldlm_lockname[lock->l_req_mode],                       \
319                        lock->l_flags, lock->l_remote_handle.cookie,           \
320                        lock->l_export ?                                       \
321                        atomic_read(&lock->l_export->exp_refcount) : -99);     \
322                 break;                                                        \
323         }                                                                     \
324         if (lock->l_resource->lr_type == LDLM_EXTENT) {                       \
325                 CDEBUG(level, "### " format                                   \
326                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
327                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s ["LPU64 \
328                        "->"LPU64"] (req "LPU64"->"LPU64") flags: %x remote: " \
329                        LPX64" expref: %d\n" , ## a,                           \
330                        lock->l_resource->lr_namespace->ns_name, lock,         \
331                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
332                        lock->l_readers, lock->l_writers,                      \
333                        ldlm_lockname[lock->l_granted_mode],                   \
334                        ldlm_lockname[lock->l_req_mode],                       \
335                        lock->l_resource->lr_name.name[0],                     \
336                        lock->l_resource->lr_name.name[1],                     \
337                        lock->l_resource->lr_name.name[2],                     \
338                        atomic_read(&lock->l_resource->lr_refcount),           \
339                        ldlm_typename[lock->l_resource->lr_type],              \
340                        lock->l_policy_data.l_extent.start,                    \
341                        lock->l_policy_data.l_extent.end,                      \
342                        lock->l_req_extent.start, lock->l_req_extent.end,      \
343                        lock->l_flags, lock->l_remote_handle.cookie,           \
344                        lock->l_export ?                                       \
345                        atomic_read(&lock->l_export->exp_refcount) : -99);     \
346                 break;                                                        \
347         }                                                                     \
348         if (lock->l_resource->lr_type == LDLM_FLOCK) {                        \
349                 CDEBUG(level, "### " format                                   \
350                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
351                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s pid: "  \
352                        LPU64" ["LPU64"->"LPU64"] flags: %x remote: "LPX64     \
353                        " expref: %d\n" , ## a,                                \
354                        lock->l_resource->lr_namespace->ns_name, lock,         \
355                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
356                        lock->l_readers, lock->l_writers,                      \
357                        ldlm_lockname[lock->l_granted_mode],                   \
358                        ldlm_lockname[lock->l_req_mode],                       \
359                        lock->l_resource->lr_name.name[0],                     \
360                        lock->l_resource->lr_name.name[1],                     \
361                        lock->l_resource->lr_name.name[2],                     \
362                        atomic_read(&lock->l_resource->lr_refcount),           \
363                        ldlm_typename[lock->l_resource->lr_type],              \
364                        lock->l_policy_data.l_flock.pid,                       \
365                        lock->l_policy_data.l_flock.start,                     \
366                        lock->l_policy_data.l_flock.end,                       \
367                        lock->l_flags, lock->l_remote_handle.cookie,           \
368                        lock->l_export ?                                       \
369                        atomic_read(&lock->l_export->exp_refcount) : -99);     \
370                 break;                                                        \
371         }                                                                     \
372         if (lock->l_resource->lr_type == LDLM_IBITS) {                        \
373                 CDEBUG(level, "### " format                                   \
374                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
375                        "res: "LPU64"/"LPU64"/"LPU64" bits "LPX64" rrc: %d "   \
376                        "type: %s flags: %x remote: "LPX64" expref: %d\n" , ## a,\
377                        lock->l_resource->lr_namespace->ns_name,               \
378                        lock, lock->l_handle.h_cookie,                         \
379                        atomic_read (&lock->l_refc),                           \
380                        lock->l_readers, lock->l_writers,                      \
381                        ldlm_lockname[lock->l_granted_mode],                   \
382                        ldlm_lockname[lock->l_req_mode],                       \
383                        lock->l_resource->lr_name.name[0],                     \
384                        lock->l_resource->lr_name.name[1],                     \
385                        lock->l_resource->lr_name.name[2],                     \
386                        lock->l_policy_data.l_inodebits.bits,                  \
387                        atomic_read(&lock->l_resource->lr_refcount),           \
388                        ldlm_typename[lock->l_resource->lr_type],              \
389                        lock->l_flags, lock->l_remote_handle.cookie,           \
390                        lock->l_export ?                                       \
391                        atomic_read(&lock->l_export->exp_refcount) : -99);     \
392                 break;                                                        \
393         }                                                                     \
394         {                                                                     \
395                 CDEBUG(level, "### " format                                   \
396                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
397                        "res: "LPU64"/"LPU64"/"LPU64"/"LPU64" rrc: %d type: %s "\
398                        "flags: %x remote: "LPX64" expref: %d\n" , ## a,       \
399                        lock->l_resource->lr_namespace->ns_name,               \
400                        lock, lock->l_handle.h_cookie,                         \
401                        atomic_read (&lock->l_refc),                           \
402                        lock->l_readers, lock->l_writers,                      \
403                        ldlm_lockname[lock->l_granted_mode],                   \
404                        ldlm_lockname[lock->l_req_mode],                       \
405                        lock->l_resource->lr_name.name[0],                     \
406                        lock->l_resource->lr_name.name[1],                     \
407                        lock->l_resource->lr_name.name[2],                     \
408                        lock->l_resource->lr_name.name[3],                     \
409                        atomic_read(&lock->l_resource->lr_refcount),           \
410                        ldlm_typename[lock->l_resource->lr_type],              \
411                        lock->l_flags, lock->l_remote_handle.cookie,           \
412                        lock->l_export ?                                       \
413                        atomic_read(&lock->l_export->exp_refcount) : -99);     \
414         }                                                                     \
415 } while (0)
416
417 #define LDLM_DEBUG(lock, format, a...) __LDLM_DEBUG(D_DLMTRACE, lock, \
418                                                     format, ## a)
419 #define LDLM_ERROR(lock, format, a...) __LDLM_DEBUG(D_ERROR, lock, format, ## a)
420
421 #define LDLM_DEBUG_NOLOCK(format, a...)                 \
422         CDEBUG(D_DLMTRACE, "### " format "\n" , ## a)
423
424 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
425                                       int first_enq, ldlm_error_t *err);
426
427 /*
428  * Iterators.
429  */
430
431 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
432 #define LDLM_ITER_STOP     2 /* stop iterating */
433
434 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
435 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
436
437 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
438                           void *closure);
439 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
440                            void *closure);
441 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
442                                ldlm_res_iterator_t iter, void *closure);
443
444 int ldlm_replay_locks(struct obd_import *imp);
445 void ldlm_change_cbdata(struct ldlm_namespace *, struct ldlm_res_id *,
446                         ldlm_iterator_t iter, void *data);
447
448 /* ldlm_flock.c */
449 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
450
451 /* ldlm_extent.c */
452 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
453
454
455 /* ldlm_lockd.c */
456 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
457                              void *data, int flag);
458 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
459 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
460 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
461                         ldlm_blocking_callback, ldlm_glimpse_callback);
462 int ldlm_handle_convert(struct ptlrpc_request *req);
463 int ldlm_handle_cancel(struct ptlrpc_request *req);
464 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
465 int ldlm_get_ref(void);
466 void ldlm_put_ref(int force);
467
468 /* ldlm_lock.c */
469 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
470 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
471 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh);
472 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *, int flags);
473 void ldlm_cancel_callback(struct ldlm_lock *);
474 int ldlm_lock_set_data(struct lustre_handle *, void *data);
475 void ldlm_lock_remove_from_lru(struct ldlm_lock *);
476 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
477                                       struct lustre_handle *);
478
479 static inline struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *h)
480 {
481         return __ldlm_handle2lock(h, 0);
482 }
483
484 #define LDLM_LOCK_PUT(lock)                     \
485 do {                                            \
486         /*LDLM_DEBUG((lock), "put");*/          \
487         ldlm_lock_put(lock);                    \
488 } while (0)
489
490 #define LDLM_LOCK_GET(lock)                     \
491 ({                                              \
492         ldlm_lock_get(lock);                    \
493         /*LDLM_DEBUG((lock), "get");*/          \
494         lock;                                   \
495 })
496
497 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
498 void ldlm_lock_put(struct ldlm_lock *lock);
499 void ldlm_lock_destroy(struct ldlm_lock *lock);
500 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
501 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
502 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
503 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
504 void ldlm_lock_allow_match(struct ldlm_lock *lock);
505 int ldlm_lock_match(struct ldlm_namespace *ns, int flags, struct ldlm_res_id *,
506                     __u32 type, ldlm_policy_data_t *, ldlm_mode_t mode,
507                     struct lustre_handle *);
508 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
509                                         int *flags);
510 void ldlm_lock_cancel(struct ldlm_lock *lock);
511 void ldlm_cancel_locks_for_export(struct obd_export *export);
512 void ldlm_reprocess_all(struct ldlm_resource *res);
513 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
514 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
515 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
516
517 /* ldlm_test.c */
518 int ldlm_test(struct obd_device *device, struct lustre_handle *connh);
519 int ldlm_regression_start(struct obd_device *obddev,
520                           struct lustre_handle *connh,
521                           unsigned int threads, unsigned int max_locks_in,
522                           unsigned int num_resources_in,
523                           unsigned int num_extents_in);
524 int ldlm_regression_stop(void);
525
526
527 /* resource.c */
528 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 local);
529 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
530 int ldlm_namespace_free(struct ldlm_namespace *ns, int force);
531 int ldlm_proc_setup(void);
532 void ldlm_proc_cleanup(void);
533
534 /* resource.c - internal */
535 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
536                                         struct ldlm_resource *parent,
537                                         struct ldlm_res_id, __u32 type,
538                                         int create);
539 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
540 int ldlm_resource_putref(struct ldlm_resource *res);
541 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
542                             struct ldlm_lock *lock);
543 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
544 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
545 void ldlm_dump_all_namespaces(void);
546 void ldlm_namespace_dump(struct ldlm_namespace *);
547 void ldlm_resource_dump(struct ldlm_resource *);
548 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
549                               struct ldlm_res_id);
550
551 /* ldlm_request.c */
552 int ldlm_expired_completion_wait(void *data);
553 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
554 int ldlm_cli_enqueue(struct obd_export *exp,
555                      struct ptlrpc_request *req,
556                      struct ldlm_namespace *ns,
557                      struct ldlm_res_id,
558                      __u32 type,
559                      ldlm_policy_data_t *,
560                      ldlm_mode_t mode,
561                      int *flags,
562                      ldlm_blocking_callback blocking,
563                      ldlm_completion_callback completion,
564                      ldlm_glimpse_callback glimpse,
565                      void *data,
566                      void *lvb,
567                      __u32 lvb_len,
568                      void *lvb_swabber,
569                      struct lustre_handle *lockh);
570 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
571                     void *data, __u32 data_len);
572 int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
573 int ldlm_cli_cancel(struct lustre_handle *lockh);
574 int ldlm_cli_cancel_unused(struct ldlm_namespace *, struct ldlm_res_id *,
575                            int flags, void *opaque);
576
577 /* mds/handler.c */
578 /* This has to be here because recursive inclusion sucks. */
579 int intent_disposition(struct ldlm_reply *rep, int flag);
580 void intent_set_disposition(struct ldlm_reply *rep, int flag);
581 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
582                      void *data, int flag);
583
584
585 /* ioctls for trying requests */
586 #define IOC_LDLM_TYPE                   'f'
587 #define IOC_LDLM_MIN_NR                 40
588
589 #define IOC_LDLM_TEST                   _IOWR('f', 40, long)
590 #define IOC_LDLM_DUMP                   _IOWR('f', 41, long)
591 #define IOC_LDLM_REGRESS_START          _IOWR('f', 42, long)
592 #define IOC_LDLM_REGRESS_STOP           _IOWR('f', 43, long)
593 #define IOC_LDLM_MAX_NR                 43
594
595 #endif