Whamcloud - gitweb
dad6885385af721e9fa09b688f35285f85b00d33
[fs/lustre-release.git] / lustre / include / linux / lustre_dlm.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * (visit-tags-table FILE)
3  * vim:expandtab:shiftwidth=8:tabstop=8:
4  */
5
6 #ifndef _LUSTRE_DLM_H__
7 #define _LUSTRE_DLM_H__
8
9 #ifdef __KERNEL__
10 # include <linux/proc_fs.h>
11 #endif
12
13 #include <linux/lustre_lib.h>
14 #include <linux/lustre_net.h>
15 #include <linux/lustre_import.h>
16 #include <linux/lustre_handles.h>
17 #include <linux/lustre_export.h> /* for obd_export, for LDLM_DEBUG */
18
19 struct obd_ops;
20 struct obd_device;
21
22 #define OBD_LDLM_DEVICENAME  "ldlm"
23
24 #define LDLM_DEFAULT_LRU_SIZE 100
25
26 typedef enum {
27         ELDLM_OK = 0,
28
29         ELDLM_LOCK_CHANGED = 300,
30         ELDLM_LOCK_ABORTED = 301,
31         ELDLM_LOCK_REPLACED = 302,
32         ELDLM_NO_LOCK_DATA = 303,
33
34         ELDLM_NAMESPACE_EXISTS = 400,
35         ELDLM_BAD_NAMESPACE    = 401
36 } ldlm_error_t;
37
38 #define LDLM_NAMESPACE_SERVER 0
39 #define LDLM_NAMESPACE_CLIENT 1
40
41 #define LDLM_FL_LOCK_CHANGED   0x000001 /* extent, mode, or resource changed */
42
43 /* If the server returns one of these flags, then the lock was put on that list.
44  * If the client sends one of these flags (during recovery ONLY!), it wants the
45  * lock added to the specified list, no questions asked. -p */
46 #define LDLM_FL_BLOCK_GRANTED  0x000002
47 #define LDLM_FL_BLOCK_CONV     0x000004
48 #define LDLM_FL_BLOCK_WAIT     0x000008
49
50 #define LDLM_FL_CBPENDING      0x000010 /* this lock is being destroyed */
51 #define LDLM_FL_AST_SENT       0x000020 /* blocking or cancel packet was sent */
52 #define LDLM_FL_WAIT_NOREPROC  0x000040 /* not a real flag, not saved in lock */
53 #define LDLM_FL_CANCEL         0x000080 /* cancellation callback already run */
54
55 /* Lock is being replayed.  This could probably be implied by the fact that one
56  * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
57 #define LDLM_FL_REPLAY         0x000100
58
59 #define LDLM_FL_INTENT_ONLY    0x000200 /* don't grant lock, just do intent */
60 #define LDLM_FL_LOCAL_ONLY     0x000400 /* see ldlm_cli_cancel_unused */
61
62 /* don't run the cancel callback under ldlm_cli_cancel_unused */
63 #define LDLM_FL_FAILED         0x000800
64
65 #define LDLM_FL_HAS_INTENT     0x001000 /* lock request has intent */
66 #define LDLM_FL_CANCELING      0x002000 /* lock cancel has already been sent */
67 #define LDLM_FL_LOCAL          0x004000 /* local lock (ie, no srv/cli split) */
68 #define LDLM_FL_WARN           0x008000 /* see ldlm_cli_cancel_unused */
69 #define LDLM_FL_DISCARD_DATA   0x010000 /* discard (no writeback) on cancel */
70 #define LDLM_FL_CONFIG_CHANGE  0x020000 /* see ldlm_cli_cancel_unused */
71
72 #define LDLM_FL_NO_TIMEOUT     0x020000 /* Blocked by group lock - wait
73                                          * indefinitely */
74
75 /* file & record locking */
76 #define LDLM_FL_BLOCK_NOWAIT   0x040000 /* server told not to wait if blocked */
77 #define LDLM_FL_TEST_LOCK      0x080000 /* return blocking lock */
78
79 /* These are flags that are mapped into the flags and ASTs of blocking locks */
80 #define LDLM_AST_DISCARD_DATA  0x80000000 /* Add FL_DISCARD to blocking ASTs */
81 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
82 #define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
83
84 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
85  * the LVB filling happens _after_ the lock has been granted, so another thread
86  * can match before the LVB has been updated.  As a dirty hack, we set
87  * LDLM_FL_CAN_MATCH only after we've done the LVB poop.
88  *
89  * The proper fix is to do the granting inside of the completion AST, which can
90  * be replaced with a LVB-aware wrapping function for OSC locks.  That change is
91  * pretty high-risk, though, and would need a lot more testing. */
92 #define LDLM_FL_CAN_MATCH      0x100000
93
94 /* A lock contributes to the kms calculation until it has finished the part
95  * of it's cancelation that performs write back on its dirty pages.  It
96  * can remain on the granted list during this whole time.  Threads racing
97  * to update the kms after performing their writeback need to know to
98  * exclude each others locks from the calculation as they walk the granted
99  * list. */
100 #define LDLM_FL_KMS_IGNORE     0x200000
101
102 /* completion ast to be executed */
103 #define LDLM_FL_CP_REQD        0x400000
104
105 /* cleanup_resource has already handled the lock */
106 #define LDLM_FL_CLEANED        0x800000
107
108 /* optimization hint: LDLM can run blocking callback from current context
109  * w/o involving separate thread. in order to decrease cs rate */
110 #define LDLM_FL_ATOMIC_CB      0x1000000
111
112 /* while this flag is set, the lock can't change resource */
113 #define LDLM_FL_LOCK_PROTECT   0x4000000
114 #define LDLM_FL_LOCK_PROTECT_BIT  26
115
116 /* The blocking callback is overloaded to perform two functions.  These flags
117  * indicate which operation should be performed. */
118 #define LDLM_CB_BLOCKING    1
119 #define LDLM_CB_CANCELING   2
120
121 /* compatibility matrix */
122 #define LCK_COMPAT_EX  LCK_NL
123 #define LCK_COMPAT_PW  (LCK_COMPAT_EX | LCK_CR)
124 #define LCK_COMPAT_PR  (LCK_COMPAT_PW | LCK_PR)
125 #define LCK_COMPAT_CW  (LCK_COMPAT_PW | LCK_CW)
126 #define LCK_COMPAT_CR  (LCK_COMPAT_CW | LCK_PR | LCK_PW)
127 #define LCK_COMPAT_NL  (LCK_COMPAT_CR | LCK_EX)
128 #define LCK_COMPAT_GROUP  (LCK_GROUP | LCK_NL)
129
130 static ldlm_mode_t lck_compat_array[] = {
131         [LCK_EX] LCK_COMPAT_EX,
132         [LCK_PW] LCK_COMPAT_PW,
133         [LCK_PR] LCK_COMPAT_PR,
134         [LCK_CW] LCK_COMPAT_CW,
135         [LCK_CR] LCK_COMPAT_CR,
136         [LCK_NL] LCK_COMPAT_NL,
137         [LCK_GROUP] LCK_COMPAT_GROUP
138 };
139
140 static inline void lockmode_verify(ldlm_mode_t mode)
141 {
142        LASSERT(mode >= LCK_EX && mode <= LCK_GROUP);
143 }
144
145 static inline int lockmode_compat(ldlm_mode_t exist, ldlm_mode_t new)
146 {
147        return (lck_compat_array[exist] & new);
148 }
149
150 /*
151  *
152  * cluster name spaces
153  *
154  */
155
156 #define DLM_OST_NAMESPACE 1
157 #define DLM_MDS_NAMESPACE 2
158
159 /* XXX
160    - do we just separate this by security domains and use a prefix for
161      multiple namespaces in the same domain?
162    -
163 */
164
165 /*
166  * Locking rules:
167  *
168  * lr_lock
169  *
170  * lr_lock
171  *     waiting_locks_spinlock
172  *
173  * lr_lock
174  *     led_lock
175  *
176  * lr_lock
177  *     ns_unused_lock
178  *
179  * lr_lvb_sem
180  *     lr_lock
181  *
182  */
183
184 struct ldlm_lock;
185 struct ldlm_resource;
186 struct ldlm_namespace;
187
188 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
189                                void *req_cookie, ldlm_mode_t mode, int flags,
190                                void *data);
191
192 struct ldlm_valblock_ops {
193         int (*lvbo_init)(struct ldlm_resource *res);
194         
195         int (*lvbo_update)(struct ldlm_resource *res,
196                            struct lustre_msg *m,
197                            int buf_idx, int increase);
198 };
199
200 struct ldlm_namespace {
201         char                  *ns_name;
202         __u32                  ns_client; /* is this a client-side lock tree? */
203         struct list_head      *ns_hash; /* hash table for ns */
204         spinlock_t             ns_hash_lock;
205         __u32                  ns_refcount; /* count of resources in the hash */
206         struct list_head       ns_root_list; /* all root resources in ns */
207         struct list_head       ns_list_chain; /* position in global NS list */
208         /*
209         struct proc_dir_entry *ns_proc_dir;
210         */
211
212         struct list_head       ns_unused_list; /* all root resources in ns */
213         int                    ns_nr_unused;
214         spinlock_t             ns_unused_lock;
215
216         unsigned int           ns_max_unused;
217         unsigned long          ns_next_dump;   /* next dump time */
218
219         atomic_t               ns_locks;
220         __u64                  ns_resources;
221         ldlm_res_policy        ns_policy;
222         struct ldlm_valblock_ops *ns_lvbo;
223         void                  *ns_lvbp;
224         wait_queue_head_t      ns_waitq;
225 };
226
227 /*
228  *
229  * Resource hash table
230  *
231  */
232
233 #define RES_HASH_BITS 10
234 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
235 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
236
237 struct ldlm_lock;
238
239 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
240                                       struct ldlm_lock_desc *new, void *data,
241                                       int flag);
242 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
243                                         void *data);
244 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
245
246 struct ldlm_lock {
247         struct portals_handle l_handle; // must be first in the structure
248         atomic_t              l_refc;
249
250         /* ldlm_lock_change_resource() can change this */
251         struct ldlm_resource *l_resource;
252
253         /* set once, no need to protect it */
254         struct ldlm_lock     *l_parent;
255
256         /* protected by ns_hash_lock */
257         struct list_head      l_children;
258         struct list_head      l_childof;
259
260         /* protected by ns_hash_lock. FIXME */
261         struct list_head      l_lru;
262
263         /* protected by lr_lock */
264         struct list_head      l_res_link; // position in one of three res lists
265
266         /* protected by led_lock */
267         struct list_head      l_export_chain; // per-export chain of locks
268
269         /* protected by lr_lock */
270         ldlm_mode_t           l_req_mode;
271         ldlm_mode_t           l_granted_mode;
272
273         ldlm_completion_callback l_completion_ast;
274         ldlm_blocking_callback   l_blocking_ast;
275         ldlm_glimpse_callback    l_glimpse_ast;
276
277         struct obd_export    *l_export;
278         struct obd_export    *l_conn_export;
279
280         /* protected by lr_lock */
281         __u32                 l_flags;
282
283         struct lustre_handle  l_remote_handle;
284         ldlm_policy_data_t    l_policy_data;
285
286         /* protected by lr_lock */
287         __u32                 l_readers;
288         __u32                 l_writers;
289         __u8                  l_destroyed;
290
291         /* If the lock is granted, a process sleeps on this waitq to learn when
292          * it's no longer in use.  If the lock is not granted, a process sleeps
293          * on this waitq to learn when it becomes granted. */
294         wait_queue_head_t     l_waitq;
295         struct timeval        l_enqueued_time;
296
297         unsigned long         l_last_used;      /* jiffies */
298         struct ldlm_extent    l_req_extent;
299
300         /* Client-side-only members */
301         __u32                 l_lvb_len;        /* temporary storage for */
302         void                 *l_lvb_data;       /* an LVB received during */
303         void                 *l_lvb_swabber;    /* an enqueue */
304         void                 *l_ast_data;
305
306         /* Server-side-only members */
307
308         /* protected by elt_lock */
309         struct list_head      l_pending_chain;  /* callbacks pending */
310         unsigned long         l_callback_timeout;
311
312         __u32                 l_pid;            /* pid which created this lock */
313         __u32                 l_pidb;           /* who holds LOCK_PROTECT_BIT */
314
315         struct list_head      l_tmp;
316
317         /* for ldlm_add_ast_work_item() */
318         struct list_head      l_bl_ast;
319         struct list_head      l_cp_ast;
320         struct ldlm_lock     *l_blocking_lock; 
321         int                   l_bl_ast_run;
322 };
323
324 #define LDLM_PLAIN       10
325 #define LDLM_EXTENT      11
326 #define LDLM_FLOCK       12
327 #define LDLM_IBITS       13
328
329 #define LDLM_MIN_TYPE 10
330 #define LDLM_MAX_TYPE 14
331
332 struct ldlm_resource {
333         struct ldlm_namespace *lr_namespace;
334
335         /* protected by ns_hash_lock */
336         struct list_head       lr_hash;
337         struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
338         struct list_head       lr_children; /* list head for child resources */
339         struct list_head       lr_childof;  /* part of ns_root_list if root res,
340                                              * part of lr_children if child */
341         spinlock_t             lr_lock;
342
343         /* protected by lr_lock */
344         struct list_head       lr_granted;
345         struct list_head       lr_converting;
346         struct list_head       lr_waiting;
347         ldlm_mode_t            lr_most_restr;
348         __u32                  lr_type; /* LDLM_PLAIN or LDLM_EXTENT */
349         struct ldlm_res_id     lr_name;
350         atomic_t               lr_refcount;
351
352         /* Server-side-only lock value block elements */
353         struct semaphore       lr_lvb_sem;
354         __u32                  lr_lvb_len;
355         void                  *lr_lvb_data;
356
357         /* lr_tmp holds a list head temporarily, during the building of a work
358          * queue.  see ldlm_add_ast_work_item and ldlm_run_ast_work */
359         void                  *lr_tmp;
360 };
361
362 struct ldlm_ast_work {
363         struct ldlm_lock *w_lock;
364         int               w_blocking;
365         struct ldlm_lock_desc w_desc;
366         struct list_head   w_list;
367         int w_flags;
368         void *w_data;
369         int w_datalen;
370 };
371
372 extern struct obd_ops ldlm_obd_ops;
373
374 extern char *ldlm_lockname[];
375 extern char *ldlm_typename[];
376 extern char *ldlm_it2str(int it);
377
378 #define __LDLM_DEBUG(level, lock, format, a...)                               \
379 do {                                                                          \
380         if (lock->l_resource == NULL) {                                       \
381                 CDEBUG(level, "### " format                                   \
382                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "\
383                        "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: "   \
384                        LPX64" expref: %d pid: %u\n" , ## a, lock,             \
385                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
386                        lock->l_readers, lock->l_writers,                      \
387                        ldlm_lockname[lock->l_granted_mode],                   \
388                        ldlm_lockname[lock->l_req_mode],                       \
389                        lock->l_flags, lock->l_remote_handle.cookie,           \
390                        lock->l_export ?                                       \
391                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
392                        lock->l_pid);                                          \
393                 break;                                                        \
394         }                                                                     \
395         if (lock->l_resource->lr_type == LDLM_EXTENT) {                       \
396                 CDEBUG(level, "### " format                                   \
397                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
398                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s ["LPU64 \
399                        "->"LPU64"] (req "LPU64"->"LPU64") flags: %x remote: " \
400                        LPX64" expref: %d pid: %u\n" , ## a,                   \
401                        lock->l_resource->lr_namespace->ns_name, lock,         \
402                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
403                        lock->l_readers, lock->l_writers,                      \
404                        ldlm_lockname[lock->l_granted_mode],                   \
405                        ldlm_lockname[lock->l_req_mode],                       \
406                        lock->l_resource->lr_name.name[0],                     \
407                        lock->l_resource->lr_name.name[1],                     \
408                        lock->l_resource->lr_name.name[2],                     \
409                        atomic_read(&lock->l_resource->lr_refcount),           \
410                        ldlm_typename[lock->l_resource->lr_type],              \
411                        lock->l_policy_data.l_extent.start,                    \
412                        lock->l_policy_data.l_extent.end,                      \
413                        lock->l_req_extent.start, lock->l_req_extent.end,      \
414                        lock->l_flags, lock->l_remote_handle.cookie,           \
415                        lock->l_export ?                                       \
416                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
417                        lock->l_pid);                                          \
418                 break;                                                        \
419         }                                                                     \
420         if (lock->l_resource->lr_type == LDLM_FLOCK) {                        \
421                 CDEBUG(level, "### " format                                   \
422                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
423                        "res: "LPU64"/"LPU64"/"LPU64" rrc: %d type: %s pid: "  \
424                        LPU64" " "["LPU64"->"LPU64"] flags: %x remote: "LPX64  \
425                        " expref: %d pid: %u\n" , ## a,                        \
426                        lock->l_resource->lr_namespace->ns_name, lock,         \
427                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),   \
428                        lock->l_readers, lock->l_writers,                      \
429                        ldlm_lockname[lock->l_granted_mode],                   \
430                        ldlm_lockname[lock->l_req_mode],                       \
431                        lock->l_resource->lr_name.name[0],                     \
432                        lock->l_resource->lr_name.name[1],                     \
433                        lock->l_resource->lr_name.name[2],                     \
434                        atomic_read(&lock->l_resource->lr_refcount),           \
435                        ldlm_typename[lock->l_resource->lr_type],              \
436                        lock->l_policy_data.l_flock.pid,                       \
437                        lock->l_policy_data.l_flock.start,                     \
438                        lock->l_policy_data.l_flock.end,                       \
439                        lock->l_flags, lock->l_remote_handle.cookie,           \
440                        lock->l_export ?                                       \
441                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
442                        lock->l_pid);                                          \
443                 break;                                                        \
444         }                                                                     \
445         if (lock->l_resource->lr_type == LDLM_IBITS) {                        \
446                 CDEBUG(level, "### " format                                   \
447                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
448                        "res: "LPU64"/"LPU64"/"LPU64" bits "LPX64" rrc: %d "   \
449                        "type: %s flags: %x remote: "LPX64" expref: %d "       \
450                        "pid %u\n" , ## a,                                     \
451                        lock->l_resource->lr_namespace->ns_name,               \
452                        lock, lock->l_handle.h_cookie,                         \
453                        atomic_read (&lock->l_refc),                           \
454                        lock->l_readers, lock->l_writers,                      \
455                        ldlm_lockname[lock->l_granted_mode],                   \
456                        ldlm_lockname[lock->l_req_mode],                       \
457                        lock->l_resource->lr_name.name[0],                     \
458                        lock->l_resource->lr_name.name[1],                     \
459                        lock->l_resource->lr_name.name[2],                     \
460                        lock->l_policy_data.l_inodebits.bits,                  \
461                        atomic_read(&lock->l_resource->lr_refcount),           \
462                        ldlm_typename[lock->l_resource->lr_type],              \
463                        lock->l_flags, lock->l_remote_handle.cookie,           \
464                        lock->l_export ?                                       \
465                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
466                        lock->l_pid);                                          \
467                 break;                                                        \
468         }                                                                     \
469         {                                                                     \
470                 CDEBUG(level, "### " format                                   \
471                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "  \
472                        "res: "LPU64"/"LPU64"/"LPU64"/"LPU64" rrc: %d type: %s " \
473                        "flags: %x remote: "LPX64" expref: %d "                \
474                        "pid: %u\n" , ## a,                                    \
475                        lock->l_resource->lr_namespace->ns_name,               \
476                        lock, lock->l_handle.h_cookie,                         \
477                        atomic_read (&lock->l_refc),                           \
478                        lock->l_readers, lock->l_writers,                      \
479                        ldlm_lockname[lock->l_granted_mode],                   \
480                        ldlm_lockname[lock->l_req_mode],                       \
481                        lock->l_resource->lr_name.name[0],                     \
482                        lock->l_resource->lr_name.name[1],                     \
483                        lock->l_resource->lr_name.name[2],                     \
484                        lock->l_resource->lr_name.name[3],                     \
485                        atomic_read(&lock->l_resource->lr_refcount),           \
486                        ldlm_typename[lock->l_resource->lr_type],              \
487                        lock->l_flags, lock->l_remote_handle.cookie,           \
488                        lock->l_export ?                                       \
489                        atomic_read(&lock->l_export->exp_refcount) : -99,      \
490                        lock->l_pid);                                          \
491         }                                                                     \
492 } while (0)
493
494 #define LDLM_DEBUG(lock, format, a...) __LDLM_DEBUG(D_DLMTRACE, lock, \
495                                                     format, ## a)
496 #define LDLM_ERROR(lock, format, a...) __LDLM_DEBUG(D_ERROR, lock, format, ## a)
497
498 #define LDLM_DEBUG_NOLOCK(format, a...)                 \
499         CDEBUG(D_DLMTRACE, "### " format "\n" , ## a)
500
501 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
502                                       int first_enq, ldlm_error_t *err,
503                                       struct list_head *work_list);
504
505 /*
506  * Iterators.
507  */
508
509 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
510 #define LDLM_ITER_STOP     2 /* stop iterating */
511
512 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
513 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
514
515 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
516                           void *closure);
517 int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
518                            void *closure);
519 int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
520                                ldlm_res_iterator_t iter, void *closure);
521
522 int ldlm_replay_locks(struct obd_import *imp);
523 void ldlm_change_cbdata(struct ldlm_namespace *, struct ldlm_res_id *,
524                         ldlm_iterator_t iter, void *data);
525
526 /* ldlm_flock.c */
527 int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
528
529 /* ldlm_extent.c */
530 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
531
532
533 /* ldlm_lockd.c */
534 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
535                              void *data, int flag);
536 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
537 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
538 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
539                         ldlm_blocking_callback, ldlm_glimpse_callback);
540 int ldlm_handle_convert(struct ptlrpc_request *req);
541 int ldlm_handle_cancel(struct ptlrpc_request *req);
542 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
543 int ldlm_get_ref(void);
544 void ldlm_put_ref(int force);
545
546 /* ldlm_lock.c */
547 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
548 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
549 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh);
550 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *, int flags);
551 void ldlm_cancel_callback(struct ldlm_lock *);
552 int ldlm_lock_set_data(struct lustre_handle *, void *data);
553 void ldlm_lock_remove_from_lru(struct ldlm_lock *);
554 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
555                                       struct lustre_handle *);
556
557 static inline struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *h)
558 {
559         return __ldlm_handle2lock(h, 0);
560 }
561
562 #define LDLM_LOCK_PUT(lock)                     \
563 do {                                            \
564         /*LDLM_DEBUG((lock), "put");*/          \
565         ldlm_lock_put(lock);                    \
566 } while (0)
567
568 #define LDLM_LOCK_GET(lock)                     \
569 ({                                              \
570         ldlm_lock_get(lock);                    \
571         /*LDLM_DEBUG((lock), "get");*/          \
572         lock;                                   \
573 })
574
575 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
576 void ldlm_lock_put(struct ldlm_lock *lock);
577 void ldlm_lock_destroy(struct ldlm_lock *lock);
578 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
579 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
580 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
581 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
582 void ldlm_lock_allow_match(struct ldlm_lock *lock);
583 int ldlm_lock_match(struct ldlm_namespace *ns, int flags, struct ldlm_res_id *,
584                     __u32 type, ldlm_policy_data_t *, ldlm_mode_t mode,
585                     struct lustre_handle *);
586 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
587                                         int *flags);
588 void ldlm_lock_cancel(struct ldlm_lock *lock);
589 void ldlm_cancel_locks_for_export(struct obd_export *export);
590 void ldlm_reprocess_all(struct ldlm_resource *res);
591 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
592 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
593 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
594
595 /* ldlm_test.c */
596 int ldlm_test(struct obd_device *device, struct lustre_handle *connh);
597 int ldlm_regression_start(struct obd_device *obddev,
598                           struct lustre_handle *connh,
599                           unsigned int threads, unsigned int max_locks_in,
600                           unsigned int num_resources_in,
601                           unsigned int num_extents_in);
602 int ldlm_regression_stop(void);
603
604
605 /* resource.c */
606 struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 local);
607 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
608 int ldlm_namespace_free(struct ldlm_namespace *ns, int force);
609 int ldlm_proc_setup(void);
610 void ldlm_proc_cleanup(void);
611
612 /* resource.c - internal */
613 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
614                                         struct ldlm_resource *parent,
615                                         struct ldlm_res_id, __u32 type,
616                                         int create);
617 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
618 int ldlm_resource_putref(struct ldlm_resource *res);
619 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
620                             struct ldlm_lock *lock);
621 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
622 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
623 void ldlm_dump_all_namespaces(int level);
624 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
625 void ldlm_resource_dump(int level, struct ldlm_resource *);
626 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
627                               struct ldlm_res_id);
628
629 /* ldlm_request.c */
630 int ldlm_expired_completion_wait(void *data);
631 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
632 int ldlm_cli_enqueue(struct obd_export *exp,
633                      struct ptlrpc_request *req,
634                      struct ldlm_namespace *ns,
635                      struct ldlm_res_id,
636                      __u32 type,
637                      ldlm_policy_data_t *,
638                      ldlm_mode_t mode,
639                      int *flags,
640                      ldlm_blocking_callback blocking,
641                      ldlm_completion_callback completion,
642                      ldlm_glimpse_callback glimpse,
643                      void *data,
644                      void *lvb,
645                      __u32 lvb_len,
646                      void *lvb_swabber,
647                      struct lustre_handle *lockh);
648 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
649                     void *data, __u32 data_len);
650 int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
651 int ldlm_cli_cancel(struct lustre_handle *lockh);
652 int ldlm_cli_cancel_unused(struct ldlm_namespace *, struct ldlm_res_id *,
653                            int flags, void *opaque);
654
655 /* mds/handler.c */
656 /* This has to be here because recursive inclusion sucks. */
657 int intent_disposition(struct ldlm_reply *rep, int flag);
658 void intent_set_disposition(struct ldlm_reply *rep, int flag);
659 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
660                      void *data, int flag);
661
662
663 /* ioctls for trying requests */
664 #define IOC_LDLM_TYPE                   'f'
665 #define IOC_LDLM_MIN_NR                 40
666
667 #define IOC_LDLM_TEST                   _IOWR('f', 40, long)
668 #define IOC_LDLM_DUMP                   _IOWR('f', 41, long)
669 #define IOC_LDLM_REGRESS_START          _IOWR('f', 42, long)
670 #define IOC_LDLM_REGRESS_STOP           _IOWR('f', 43, long)
671 #define IOC_LDLM_MAX_NR                 43
672
673 static inline void lock_res(struct ldlm_resource *res)
674 {
675         spin_lock(&res->lr_lock);
676 }
677
678 static inline void unlock_res(struct ldlm_resource *res)
679 {
680         spin_unlock(&res->lr_lock);
681 }
682
683 static inline void check_res_locked(struct ldlm_resource *res)
684 {
685         LASSERT_SPIN_LOCKED(&res->lr_lock);
686 }
687
688 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
689 void unlock_res_and_lock(struct ldlm_lock *lock);
690
691 #endif