Whamcloud - gitweb
Branch: HEAD
[fs/lustre-release.git] / lustre / include / linux / lustre_dlm.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * (visit-tags-table FILE)
3  * vim:expandtab:shiftwidth=8:tabstop=8:
4  */
5
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
8
9 #ifdef __KERNEL__
10 # include <linux/proc_fs.h>
11 #endif
12
13 #include <linux/lustre_lib.h>
14 #include <linux/lustre_net.h>
15 #include <linux/lustre_import.h>
16 #include <linux/lustre_handles.h>
17 #include <linux/lustre_export.h> /* for obd_export, for LDLM_DEBUG */
18
19 struct obd_ops;
20 struct obd_device;
21
22 #define LDLM_DEFAULT_LRU_SIZE 100
23
24 typedef enum {
25         ELDLM_OK = 0,
26
27         ELDLM_LOCK_CHANGED = 300,
28         ELDLM_LOCK_ABORTED = 301,
29         ELDLM_LOCK_REPLACED = 302,
30         ELDLM_NO_LOCK_DATA = 303,
31
32         ELDLM_NAMESPACE_EXISTS = 400,
33         ELDLM_BAD_NAMESPACE    = 401
34 } ldlm_error_t;
35
36 #define LDLM_NAMESPACE_SERVER 0
37 #define LDLM_NAMESPACE_CLIENT 1
38
39 #define LDLM_FL_LOCK_CHANGED   0x000001 /* extent, mode, or resource changed */
40
41 /* If the server returns one of these flags, then the lock was put on that list.
42  * If the client sends one of these flags (during recovery ONLY!), it wants the
43  * lock added to the specified list, no questions asked. -p */
44 #define LDLM_FL_BLOCK_GRANTED  0x000002
45 #define LDLM_FL_BLOCK_CONV     0x000004
46 #define LDLM_FL_BLOCK_WAIT     0x000008
47
48 #define LDLM_FL_CBPENDING      0x000010 /* this lock is being destroyed */
49 #define LDLM_FL_AST_SENT       0x000020 /* blocking or cancel packet was sent */
50 #define LDLM_FL_WAIT_NOREPROC  0x000040 /* not a real flag, not saved in lock */
51 #define LDLM_FL_CANCEL         0x000080 /* cancellation callback already run */
52
53 /* Lock is being replayed.  This could probably be implied by the fact that one
54  * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
55 #define LDLM_FL_REPLAY         0x000100
56
57 #define LDLM_FL_INTENT_ONLY    0x000200 /* don't grant lock, just do intent */
58 #define LDLM_FL_LOCAL_ONLY     0x000400 /* see ldlm_cli_cancel_unused */
59
60 /* don't run the cancel callback under ldlm_cli_cancel_unused */
61 #define LDLM_FL_FAILED         0x000800
62
63 #define LDLM_FL_HAS_INTENT     0x001000 /* lock request has intent */
64 #define LDLM_FL_CANCELING      0x002000 /* lock cancel has already been sent */
65 #define LDLM_FL_LOCAL          0x004000 /* local lock (ie, no srv/cli split) */
66 #define LDLM_FL_WARN           0x008000 /* see ldlm_cli_cancel_unused */
67 #define LDLM_FL_DISCARD_DATA   0x010000 /* discard (no writeback) on cancel */
68 #define LDLM_FL_CONFIG_CHANGE  0x020000 /* see ldlm_cli_cancel_unused */
69
70 #define LDLM_FL_NO_TIMEOUT     0x040000 /* Blocked by group lock - wait
71                                          * indefinitely */
72
73 /* file & record locking */
74 #define LDLM_FL_BLOCK_NOWAIT   0x080000 /* server told not to wait if blocked */
75 #define LDLM_FL_TEST_LOCK      0x100000 /* return blocking lock */
76 #define LDLM_FL_GET_BLOCKING   0x200000 /* return updated blocking proc info */
77 #define LDLM_FL_DEADLOCK_CHK   0x400000 /* check for deadlock */
78 #define LDLM_FL_DEADLOCK_DEL   0x800000 /* lock no longer blocked */
79
80 /* These are flags that are mapped into the flags and ASTs of blocking locks */
81 #define LDLM_AST_DISCARD_DATA  0x80000000 /* Add FL_DISCARD to blocking ASTs */
82 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
83 #define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
84
85 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
86  * the LVB filling happens _after_ the lock has been granted, so another thread
87  * can match before the LVB has been updated.  As a dirty hack, we set
88  * LDLM_FL_CAN_MATCH only after we've done the LVB poop.
89  *
90  * The proper fix is to do the granting inside of the completion AST, which can
91  * be replaced with a LVB-aware wrapping function for OSC locks.  That change is
92  * pretty high-risk, though, and would need a lot more testing. */
93 #define LDLM_FL_CAN_MATCH      0x100000
94
95 /* A lock contributes to the kms calculation until it has finished the part
96  * of it's cancelation that performs write back on its dirty pages.  It
97  * can remain on the granted list during this whole time.  Threads racing
98  * to update the kms after performing their writeback need to know to
99  * exclude each others locks from the calculation as they walk the granted
100  * list. */
101 #define LDLM_FL_KMS_IGNORE     0x200000
102
103 /* completion ast to be executed */
104 #define LDLM_FL_CP_REQD        0x400000
105
106 /* cleanup_resource has already handled the lock */
107 #define LDLM_FL_CLEANED        0x800000
108
109 /* optimization hint: LDLM can run blocking callback from current context
110  * w/o involving separate thread. in order to decrease cs rate */
111 #define LDLM_FL_ATOMIC_CB      0x1000000
112
113 /* while this flag is set, the lock can't change resource */
114 #define LDLM_FL_LOCK_PROTECT   0x4000000
115 #define LDLM_FL_LOCK_PROTECT_BIT  26
116
117 /* The blocking callback is overloaded to perform two functions.  These flags
118  * indicate which operation should be performed. */
119 #define LDLM_CB_BLOCKING    1
120 #define LDLM_CB_CANCELING   2
121
122 /* compatibility matrix */
123 #define LCK_COMPAT_EX  LCK_NL
124 #define LCK_COMPAT_PW  (LCK_COMPAT_EX | LCK_CR)
125 #define LCK_COMPAT_PR  (LCK_COMPAT_PW | LCK_PR)
126 #define LCK_COMPAT_CW  (LCK_COMPAT_PW | LCK_CW)
127 #define LCK_COMPAT_CR  (LCK_COMPAT_CW | LCK_PR | LCK_PW)
128 #define LCK_COMPAT_NL  (LCK_COMPAT_CR | LCK_EX)
129 #define LCK_COMPAT_GROUP  (LCK_GROUP | LCK_NL)
130
131 static ldlm_mode_t lck_compat_array[] = {
132         [LCK_EX] LCK_COMPAT_EX,
133         [LCK_PW] LCK_COMPAT_PW,
134         [LCK_PR] LCK_COMPAT_PR,
135         [LCK_CW] LCK_COMPAT_CW,
136         [LCK_CR] LCK_COMPAT_CR,
137         [LCK_NL] LCK_COMPAT_NL,
138         [LCK_GROUP] LCK_COMPAT_GROUP
139 };
140
141 static inline void lockmode_verify(ldlm_mode_t mode)
142 {
143        LASSERT(mode >= LCK_EX && mode <= LCK_GROUP);
144 }
145
146 static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new)
147 {
148        return (lck_compat_array[exist] & new);
149 }
150
151 /*
152  *
153  * cluster name spaces
154  *
155  */
156
157 #define DLM_OST_NAMESPACE 1
158 #define DLM_MDS_NAMESPACE 2
159
160 /* XXX
161    - do we just separate this by security domains and use a prefix for
162      multiple namespaces in the same domain?
163    -
164 */
165
166 /*
167  * Locking rules:
168  *
169  * lr_lock
170  *
171  * lr_lock
172  *     waiting_locks_spinlock
173  *
174  * lr_lock
175  *     led_lock
176  *
177  * lr_lock
178  *     ns_unused_lock
179  *
180  * lr_lvb_sem
181  *     lr_lock
182  *
183  */
184
185 struct ldlm_lock;
186 struct ldlm_resource;
187 struct ldlm_namespace;
188
189 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
190                                void *req_cookie, ldlm_mode_t mode, int flags,
191                                void *data);
192
193 struct ldlm_valblock_ops {
194         int (*lvbo_init)(struct ldlm_resource *res);
195         
196         int (*lvbo_update)(struct ldlm_resource *res,
197                            struct lustre_msg *m,
198                            int buf_idx, int increase);
199 };
200
201 struct ldlm_namespace {
202         char                  *ns_name;
203         __u32                  ns_client; /* is this a client-side lock tree? */
204         struct list_head      *ns_hash; /* hash table for ns */
205         spinlock_t             ns_hash_lock;
206         __u32                  ns_refcount; /* count of resources in the hash */
207         struct list_head       ns_root_list; /* all root resources in ns */
208         struct list_head       ns_list_chain; /* position in global NS list */
209         /*
210         struct proc_dir_entry *ns_proc_dir;
211         */
212
213         struct list_head       ns_unused_list; /* all root resources in ns */
214         int                    ns_nr_unused;
215         spinlock_t             ns_unused_lock;
216
217         unsigned int           ns_max_unused;
218         unsigned long          ns_next_dump;   /* next dump time */
219
220         atomic_t               ns_locks;
221         __u64                  ns_resources;
222         ldlm_res_policy        ns_policy;
223         struct ldlm_valblock_ops *ns_lvbo;
224         void                  *ns_lvbp;
225         wait_queue_head_t      ns_waitq;
226 };
227
228 /*
229  *
230  * Resource hash table
231  *
232  */
233
234 #define RES_HASH_BITS 10
235 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
236 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
237
238 struct ldlm_lock;
239
240 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
241                                       struct ldlm_lock_desc *new, void *data,
242                                       int flag);
243 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
244                                         void *data);
245 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
246
247 struct ldlm_lock {
248         struct portals_handle l_handle; // must be first in the structure
249         atomic_t              l_refc;
250
251         /* ldlm_lock_change_resource() can change this */
252         struct ldlm_resource *l_resource;
253
254         /* set once, no need to protect it */
255         struct ldlm_lock     *l_parent;
256
257         /* protected by ns_hash_lock */
258         struct list_head      l_children;
259         struct list_head      l_childof;
260
261         /* protected by ns_hash_lock. FIXME */
262         struct list_head      l_lru;
263
264         /* protected by lr_lock */
265         struct list_head      l_res_link; // position in one of three res lists
266
267         /* protected by led_lock */
268         struct list_head      l_export_chain; // per-export chain of locks
269
270         /* protected by lr_lock */
271         ldlm_mode_t           l_req_mode;
272         ldlm_mode_t           l_granted_mode;
273
274         ldlm_completion_callback l_completion_ast;
275         ldlm_blocking_callback   l_blocking_ast;
276         ldlm_glimpse_callback    l_glimpse_ast;
277
278         struct obd_export    *l_export;
279         struct obd_export    *l_conn_export;
280
281         /* protected by lr_lock */
282         __u32                 l_flags;
283
284         struct lustre_handle  l_remote_handle;
285         ldlm_policy_data_t    l_policy_data;
286
287         /* protected by lr_lock */
288         __u32                 l_readers;
289         __u32                 l_writers;
290         __u8                  l_destroyed;
291
292         /* If the lock is granted, a process sleeps on this waitq to learn when
293          * it's no longer in use.  If the lock is not granted, a process sleeps
294          * on this waitq to learn when it becomes granted. */
295         wait_queue_head_t     l_waitq;
296         struct timeval        l_enqueued_time;
297
298         unsigned long         l_last_used;      /* jiffies */
299         struct ldlm_extent    l_req_extent;
300
301         /* Client-side-only members */
302         __u32                 l_lvb_len;        /* temporary storage for */
303         void                 *l_lvb_data;       /* an LVB received during */
304         void                 *l_lvb_swabber;    /* an enqueue */
305         void                 *l_ast_data;
306
307         /* Server-side-only members */
308
309         /* protected by elt_lock */
310         struct list_head      l_pending_chain;  /* callbacks pending */
311         unsigned long         l_callback_timeout;
312
313         __u32                 l_pid;            /* pid which created this lock */
314         __u32                 l_pidb;           /* who holds LOCK_PROTECT_BIT */
315
316         struct list_head      l_tmp;
317
318         /* for ldlm_add_ast_work_item() */
319         struct list_head      l_bl_ast;
320         struct list_head      l_cp_ast;
321         struct ldlm_lock     *l_blocking_lock; 
322         int                   l_bl_ast_run;
323 };
324
325 #define LDLM_PLAIN       10
326 #define LDLM_EXTENT      11
327 #define LDLM_FLOCK       12
328 #define LDLM_IBITS       13
329
330 #define LDLM_MIN_TYPE 10
331 #define LDLM_MAX_TYPE 14
332
333 struct ldlm_resource {
334         struct ldlm_namespace *lr_namespace;
335
336         /* protected by ns_hash_lock */
337         struct list_head       lr_hash;
338         struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
339         struct list_head       lr_children; /* list head for child resources */
340         struct list_head       lr_childof;  /* part of ns_root_list if root res,
341                                              * part of lr_children if child */
342         spinlock_t             lr_lock;
343
344         /* protected by lr_lock */
345         struct list_head       lr_granted;
346         struct list_head       lr_converting;
347         struct list_head       lr_waiting;
348         ldlm_mode_t            lr_most_restr;
349         __u32                  lr_type; /* LDLM_PLAIN or LDLM_EXTENT */
350         struct ldlm_res_id     lr_name;
351         atomic_t               lr_refcount;
352
353         /* Server-side-only lock value block elements */
354         struct semaphore       lr_lvb_sem;
355         __u32                  lr_lvb_len;
356         void                  *lr_lvb_data;
357
358         /* lr_tmp holds a list head temporarily, during the building of a work
359          * queue.  see ldlm_add_ast_work_item and ldlm_run_ast_work */
360         void                  *lr_tmp;
361 };
362
363 struct ldlm_ast_work {
364         struct ldlm_lock *w_lock;
365         int               w_blocking;
366         struct ldlm_lock_desc w_desc;
367         struct list_head   w_list;
368         int w_flags;
369         void *w_data;
370         int w_datalen;
371 };
372
373 extern struct obd_ops ldlm_obd_ops;
374
375 extern char *ldlm_lockname[];
376 extern char *ldlm_typename[];
377 extern char *ldlm_it2str(int it);
378
379 #define __LDLM_DEBUG(level, lock, format, a...)                               \
380 do {                                                                          \
381         if (lock->l_resource == NULL) {                                       \
382                 CDEBUG(level, "### " format                                   \
383                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "\
384                        "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: "   \
385                        LPX64" expref: %d pid: %u\n" , ## a, lock,             \
386                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
387                        lock->l_readers, lock->l_writers,                      \
388                        ldlm_lockname[lock->l_granted_mode],                   \
389                        ldlm_lockname[lock->l_req_mode],                       \
390                        lock->l_flags, lock->l_remote_handle.cookie,           \
391                        lock->l_export ?                                       \
392                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
393                        lock->l_pid);                                          \
394                 break;                                                        \
395         }                                                                     \
396         if (lock->l_resource->lr_type == LDLM_EXTENT) {                       \
397                 CDEBUG(level, "### " format                                   \
398                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
399                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s ["LPU64 \
400                        "->"LPU64"] (req "LPU64"->"LPU64") flags: %x remote: " \
401                        LPX64" expref: %d pid: %u\n" , ## a,                   \
402                        lock->l_resource->lr_namespace->ns_name, lock,         \
403                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
404                        lock->l_readers, lock->l_writers,                      \
405                        ldlm_lockname[lock->l_granted_mode],                   \
406                        ldlm_lockname[lock->l_req_mode],                       \
407                        lock->l_resource->lr_name.name[0],                     \
408                        lock->l_resource->lr_name.name[1],                     \
409                        lock->l_resource->lr_name.name[2],                     \
410                        atomic_read(&lock->l_resource->lr_refcount),           \
411                        ldlm_typename[lock->l_resource->lr_type],              \
412                        lock->l_policy_data.l_extent.start,                    \
413                        lock->l_policy_data.l_extent.end,                      \
414                        lock->l_req_extent.start, lock->l_req_extent.end,      \
415                        lock->l_flags, lock->l_remote_handle.cookie,           \
416                        lock->l_export ?                                       \
417                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
418                        lock->l_pid);                                          \
419                 break;                                                        \
420         }                                                                     \
421         if (lock->l_resource->lr_type == LDLM_FLOCK) {                        \
422                 CDEBUG(level, "### " format                                   \
423                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
424                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s "       \
425                        "pid: "LPU64" nid: "LPU64" ["LPU64"->"LPU64"] "        \
426                        "flags: %x remote: "LPX64" expref: %d pid: %u\n", ## a,\
427                        lock->l_resource->lr_namespace->ns_name, lock,         \
428                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
429                        lock->l_readers, lock->l_writers,                      \
430                        ldlm_lockname[lock->l_granted_mode],                   \
431                        ldlm_lockname[lock->l_req_mode],                       \
432                        lock->l_resource->lr_name.name[0],                     \
433                        lock->l_resource->lr_name.name[1],                     \
434                        lock->l_resource->lr_name.name[2],                     \
435                        atomic_read(&lock->l_resource->lr_refcount),           \
436                        ldlm_typename[lock->l_resource->lr_type],              \
437                        lock->l_policy_data.l_flock.pid,                       \
438                        lock->l_policy_data.l_flock.nid,                       \
439                        lock->l_policy_data.l_flock.start,                     \
440                        lock->l_policy_data.l_flock.end,                       \
441                        lock->l_flags, lock->l_remote_handle.cookie,           \
442                        lock->l_export ?                                       \
443                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
444                        lock->l_pid);                                          \
445                 break;                                                        \
446         }                                                                     \
447         if (lock->l_resource->lr_type == LDLM_IBITS) {                        \
448                 CDEBUG(level, "### " format                                   \
449                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
450                        "res: "LPU64"/"LPU64"/"LPU64" bits "LPX64" rrc: %d "   \
451                        "type: %s flags: %x remote: "LPX64" expref: %d "       \
452                        "pid %u\n" , ## a,                                     \
453                        lock->l_resource->lr_namespace->ns_name,               \
454                        lock, lock->l_handle.h_cookie,                         \
455                        atomic_read (&lock->l_refc),                           \
456                        lock->l_readers, lock->l_writers,                      \
457                        ldlm_lockname[lock->l_granted_mode],                   \
458                        ldlm_lockname[lock->l_req_mode],                       \
459                        lock->l_resource->lr_name.name[0],                     \
460                        lock->l_resource->lr_name.name[1],                     \
461                        lock->l_resource->lr_name.name[2],                     \
462                        lock->l_policy_data.l_inodebits.bits,                  \
463                        atomic_read(&lock->l_resource->lr_refcount),           \
464                        ldlm_typename[lock->l_resource->lr_type],              \
465                        lock->l_flags, lock->l_remote_handle.cookie,           \
466                        lock->l_export ?                                       \
467                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
468                        lock->l_pid);                                          \
469                 break;                                                        \
470         }                                                                     \
471         {                                                                     \
472                 CDEBUG(level, "### " format                                   \
473                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
474                        "res: "LPU64"/"LPU64"/"LPU64"/"LPU64" rrc: %d type: %s " \
475                        "flags: %x remote: "LPX64" expref: %d "                \
476                        "pid: %u\n" , ## a,                                    \
477                        lock->l_resource->lr_namespace->ns_name,               \
478                        lock, lock->l_handle.h_cookie,                         \
479                        atomic_read (&lock->l_refc),                           \
480                        lock->l_readers, lock->l_writers,                      \
481                        ldlm_lockname[lock->l_granted_mode],                   \
482                        ldlm_lockname[lock->l_req_mode],                       \
483                        lock->l_resource->lr_name.name[0],                     \
484                        lock->l_resource->lr_name.name[1],                     \
485                        lock->l_resource->lr_name.name[2],                     \
486                        lock->l_resource->lr_name.name[3],                     \
487                        atomic_read(&lock->l_resource->lr_refcount),           \
488                        ldlm_typename[lock->l_resource->lr_type],              \
489                        lock->l_flags, lock->l_remote_handle.cookie,           \
490                        lock->l_export ?                                       \
491                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
492                        lock->l_pid);                                          \
493         }                                                                     \
494 } while (0)
495
496 #define LDLM_DEBUG(lock, format, a...) __LDLM_DEBUG(D_DLMTRACE, lock, \
497                                                     format, ## a)
498 #define LDLM_ERROR(lock, format, a...) __LDLM_DEBUG(D_ERROR, lock, format, ## a)
499
500 #define LDLM_DEBUG_NOLOCK(format, a...)                 \
501         CDEBUG(D_DLMTRACE, "### " format "\n" , ## a)
502
503 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
504                                       int first_enq, ldlm_error_t *err,
505                                       struct list_head *work_list);
506
507 /*
508  * Iterators.
509  */
510
511 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
512 #define LDLM_ITER_STOP     2 /* stop iterating */
513
514 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
515 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
516
517 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
518                           void *closure);
519 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
520                            void *closure);
521 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
522                                ldlm_res_iterator_t iter, void *closure);
523
524 int ldlm_replay_locks(struct obd_import *imp);
525 void ldlm_change_cbdata(struct ldlm_namespace *, struct ldlm_res_id *,
526                         ldlm_iterator_t iter, void *data);
527
528 /* ldlm_flock.c */
529 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
530 int ldlm_handle_flock_deadlock_check(struct ptlrpc_request *req);
531
532 /* ldlm_extent.c */
533 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
534
535
536 /* ldlm_lockd.c */
537 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
538                              void *data, int flag);
539 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
540 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
541 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
542                         ldlm_blocking_callback, ldlm_glimpse_callback);
543 int ldlm_handle_convert(struct ptlrpc_request *req);
544 int ldlm_handle_cancel(struct ptlrpc_request *req);
545 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
546 int ldlm_get_ref(void);
547 void ldlm_put_ref(int force);
548
549 /* ldlm_lock.c */
550 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
551 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
552 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh);
553 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *, int flags);
554 void ldlm_cancel_callback(struct ldlm_lock *);
555 int ldlm_lock_set_data(struct lustre_handle *, void *data);
556 void ldlm_lock_remove_from_lru(struct ldlm_lock *);
557 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
558                                       struct lustre_handle *);
559
560 static inline struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *h)
561 {
562         return __ldlm_handle2lock(h, 0);
563 }
564
565 #define LDLM_LOCK_PUT(lock)                     \
566 do {                                            \
567         /*LDLM_DEBUG((lock), "put");*/          \
568         ldlm_lock_put(lock);                    \
569 } while (0)
570
571 #define LDLM_LOCK_GET(lock)                     \
572 ({                                              \
573         ldlm_lock_get(lock);                    \
574         /*LDLM_DEBUG((lock), "get");*/          \
575         lock;                                   \
576 })
577
578 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
579 void ldlm_lock_put(struct ldlm_lock *lock);
580 void ldlm_lock_destroy(struct ldlm_lock *lock);
581 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
582 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
583 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
584 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
585 void ldlm_lock_allow_match(struct ldlm_lock *lock);
586 int ldlm_lock_match(struct ldlm_namespace *ns, int flags, struct ldlm_res_id *,
587                     __u32 type, ldlm_policy_data_t *, ldlm_mode_t mode,
588                     struct lustre_handle *);
589 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
590                                         int *flags);
591 void ldlm_lock_cancel(struct ldlm_lock *lock);
592 void ldlm_cancel_locks_for_export(struct obd_export *export);
593 void ldlm_reprocess_all(struct ldlm_resource *res);
594 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
595 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
596 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
597
598 /* ldlm_test.c */
599 int ldlm_test(struct obd_device *device, struct lustre_handle *connh);
600 int ldlm_regression_start(struct obd_device *obddev,
601                           struct lustre_handle *connh,
602                           unsigned int threads, unsigned int max_locks_in,
603                           unsigned int num_resources_in,
604                           unsigned int num_extents_in);
605 int ldlm_regression_stop(void);
606
607
608 /* resource.c */
609 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 local);
610 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
611 int ldlm_namespace_free(struct ldlm_namespace *ns, int force);
612 int ldlm_proc_setup(void);
613 void ldlm_proc_cleanup(void);
614
615 /* resource.c - internal */
616 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
617                                         struct ldlm_resource *parent,
618                                         struct ldlm_res_id, __u32 type,
619                                         int create);
620 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
621 int ldlm_resource_putref(struct ldlm_resource *res);
622 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
623                             struct ldlm_lock *lock);
624 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
625 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
626 void ldlm_dump_all_namespaces(int level);
627 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
628 void ldlm_resource_dump(int level, struct ldlm_resource *);
629 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
630                               struct ldlm_res_id);
631
632 /* ldlm_request.c */
633 int ldlm_expired_completion_wait(void *data);
634 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
635 int ldlm_cli_enqueue(struct obd_export *exp,
636                      struct ptlrpc_request *req,
637                      struct ldlm_namespace *ns,
638                      struct ldlm_res_id,
639                      __u32 type,
640                      ldlm_policy_data_t *,
641                      ldlm_mode_t mode,
642                      int *flags,
643                      ldlm_blocking_callback blocking,
644                      ldlm_completion_callback completion,
645                      ldlm_glimpse_callback glimpse,
646                      void *data,
647                      void *lvb,
648                      __u32 lvb_len,
649                      void *lvb_swabber,
650                      struct lustre_handle *lockh);
651 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
652                     void *data, __u32 data_len);
653 int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
654 int ldlm_cli_cancel(struct lustre_handle *lockh);
655 int ldlm_cli_cancel_unused(struct ldlm_namespace *, struct ldlm_res_id *,
656                            int flags, void *opaque);
657
658 /* mds/handler.c */
659 /* This has to be here because recursive inclusion sucks. */
660 int intent_disposition(struct ldlm_reply *rep, int flag);
661 void intent_set_disposition(struct ldlm_reply *rep, int flag);
662 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
663                      void *data, int flag);
664
665
666 /* ioctls for trying requests */
667 #define IOC_LDLM_TYPE                   'f'
668 #define IOC_LDLM_MIN_NR                 40
669
670 #define IOC_LDLM_TEST                   _IOWR('f', 40, long)
671 #define IOC_LDLM_DUMP                   _IOWR('f', 41, long)
672 #define IOC_LDLM_REGRESS_START          _IOWR('f', 42, long)
673 #define IOC_LDLM_REGRESS_STOP           _IOWR('f', 43, long)
674 #define IOC_LDLM_MAX_NR                 43
675
676 static inline void lock_res(struct ldlm_resource *res)
677 {
678         spin_lock(&res->lr_lock);
679 }
680
681 static inline void unlock_res(struct ldlm_resource *res)
682 {
683         spin_unlock(&res->lr_lock);
684 }
685
686 static inline void check_res_locked(struct ldlm_resource *res)
687 {
688         LASSERT_SPIN_LOCKED(&res->lr_lock);
689 }
690
691 static inline void lock_bitlock(struct ldlm_lock *lock)
692 {
693         bit_spin_lock(LDLM_FL_LOCK_PROTECT_BIT, (void *) &lock->l_flags);
694         LASSERT(lock->l_pidb == 0);
695         lock->l_pidb = current->pid;
696 }
697
698 static inline void unlock_bitlock(struct ldlm_lock *lock)
699 {
700         LASSERT(lock->l_pidb == current->pid);
701         lock->l_pidb = 0;
702         bit_spin_unlock(LDLM_FL_LOCK_PROTECT_BIT, (void *) &lock->l_flags);
703 }
704
705 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
706 void unlock_res_and_lock(struct ldlm_lock *lock);
707
708 #endif