Whamcloud - gitweb
b=3984
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #ifdef __KERNEL__
27 # include <linux/slab.h>
28 # include <linux/dcache.h>
29 # include <linux/namei.h>
30 # include <linux/module.h>
31 # include <linux/lustre_dlm.h>
32 #else
33 # include <liblustre.h>
34 # include <libcfs/kp30.h>
35 #endif
36
37 #include <linux/obd_class.h>
38 #include "ldlm_internal.h"
39
40 //struct lustre_lock ldlm_everything_lock;
41
42 /* lock types */
43 char *ldlm_lockname[] = {
44         [0] "--",
45         [LCK_EX] "EX",
46         [LCK_PW] "PW",
47         [LCK_PR] "PR",
48         [LCK_CW] "CW",
49         [LCK_CR] "CR",
50         [LCK_NL] "NL",
51         [LCK_GROUP] "GROUP"
52 };
53 char *ldlm_typename[] = {
54         [LDLM_PLAIN] "PLN",
55         [LDLM_EXTENT] "EXT",
56         [LDLM_FLOCK] "FLK",
57         [LDLM_IBITS] "IBT",
58 };
59
60 char *ldlm_it2str(int it)
61 {
62         switch (it) {
63         case IT_OPEN:
64                 return "open";
65         case IT_CREAT:
66                 return "creat";
67         case (IT_OPEN | IT_CREAT):
68                 return "open|creat";
69         case IT_READDIR:
70                 return "readdir";
71         case IT_GETATTR:
72                 return "getattr";
73         case IT_LOOKUP:
74                 return "lookup";
75         case IT_UNLINK:
76                 return "unlink";
77         case IT_GETXATTR:
78                 return "getxattr";
79         case IT_CHDIR:
80                 return "chdir";
81         default:
82                 CERROR("Unknown intent %d\n", it);
83                 return "UNKNOWN";
84         }
85 }
86
87 extern kmem_cache_t *ldlm_lock_slab;
88
89 static ldlm_processing_policy ldlm_processing_policy_table[] = {
90         [LDLM_PLAIN] ldlm_process_plain_lock,
91         [LDLM_EXTENT] ldlm_process_extent_lock,
92 #ifdef __KERNEL__
93         [LDLM_FLOCK] ldlm_process_flock_lock,
94 #endif
95         [LDLM_IBITS] ldlm_process_inodebits_lock,
96 };
97
98 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
99 {
100         return ldlm_processing_policy_table[res->lr_type];
101 }
102
103 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
104 {
105         ns->ns_policy = arg;
106 }
107
108 /*
109  * REFCOUNTED LOCK OBJECTS
110  */
111
112
113 /*
114  * Lock refcounts, during creation:
115  *   - one special one for allocation, dec'd only once in destroy
116  *   - one for being a lock that's in-use
117  *   - one for the addref associated with a new lock
118  */
119 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
120 {
121         atomic_inc(&lock->l_refc);
122         return lock;
123 }
124
125 void ldlm_lock_put(struct ldlm_lock *lock)
126 {
127         ENTRY;
128
129         LASSERT(lock->l_resource != LP_POISON);
130         LASSERT(atomic_read(&lock->l_refc) > 0);
131         if (atomic_dec_and_test(&lock->l_refc)) {
132                 struct ldlm_resource *res = lock->l_resource;
133                 struct ldlm_namespace *ns = res->lr_namespace;
134
135                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
136
137                 LASSERT(lock->l_resource != LP_POISON);
138                 lock_res(res);
139                 LASSERT(lock->l_destroyed);
140                 LASSERT(list_empty(&lock->l_res_link));
141
142                 if (lock->l_parent)
143                         LDLM_LOCK_PUT(lock->l_parent);
144                 unlock_res(res);
145
146                 ldlm_resource_putref(lock->l_resource);
147                 lock->l_resource = NULL;
148                 if (lock->l_export)
149                         class_export_put(lock->l_export);
150                 atomic_dec(&ns->ns_locks);
151
152                 if (lock->l_lvb_data != NULL)
153                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
154
155                 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
156         }
157
158         EXIT;
159 }
160
161 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
162 {
163         ENTRY;
164         spin_lock(&lock->l_resource->lr_namespace->ns_unused_lock);
165         if (!list_empty(&lock->l_lru)) {
166                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
167                 list_del_init(&lock->l_lru);
168                 lock->l_resource->lr_namespace->ns_nr_unused--;
169                 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
170         }
171         spin_unlock(&lock->l_resource->lr_namespace->ns_unused_lock);
172         EXIT;
173 }
174
175 /* This used to have a 'strict' flact, which recovery would use to mark an
176  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
177  * shall explain why it's gone: with the new hash table scheme, once you call
178  * ldlm_lock_destroy, you can never drop your final references on this lock.
179  * Because it's not in the hash table anymore.  -phil */
180 void ldlm_lock_destroy(struct ldlm_lock *lock)
181 {
182         ENTRY;
183
184         lock_res(lock->l_resource);
185
186         if (!list_empty(&lock->l_children)) {
187                 LDLM_ERROR(lock, "still has children (%p)!",
188                            lock->l_children.next);
189                 ldlm_lock_dump(D_ERROR, lock, 0);
190                 LBUG();
191         }
192         if (lock->l_readers || lock->l_writers) {
193                 LDLM_ERROR(lock, "lock still has references");
194                 ldlm_lock_dump(D_ERROR, lock, 0);
195                 LBUG();
196         }
197
198         if (!list_empty(&lock->l_res_link)) {
199                 LDLM_ERROR(lock, "lock still on resource");
200                 ldlm_lock_dump(D_ERROR, lock, 0);
201                 LBUG();
202         }
203
204         if (lock->l_destroyed) {
205                 LASSERT(list_empty(&lock->l_lru));
206                 unlock_res(lock->l_resource);
207                 EXIT;
208                 return;
209         }
210         lock->l_destroyed = 1;
211
212         if (lock->l_export) {
213                 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
214                 if (!list_empty(&lock->l_export_chain))
215                         list_del_init(&lock->l_export_chain);
216                 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
217         } else {
218                 LASSERT(list_empty(&lock->l_export_chain));
219         }       
220
221         ldlm_lock_remove_from_lru(lock);
222         class_handle_unhash(&lock->l_handle);
223
224 #if 0
225         /* Wake anyone waiting for this lock */
226         /* FIXME: I should probably add yet another flag, instead of using
227          * l_export to only call this on clients */
228         if (lock->l_export)
229                 class_export_put(lock->l_export);
230         lock->l_export = NULL;
231         if (lock->l_export && lock->l_completion_ast)
232                 lock->l_completion_ast(lock, 0);
233 #endif
234
235         unlock_res(lock->l_resource);
236         LDLM_LOCK_PUT(lock);
237         EXIT;
238 }
239
240 /* this is called by portals_handle2object with the handle lock taken */
241 static void lock_handle_addref(void *lock)
242 {
243         LDLM_LOCK_GET((struct ldlm_lock *)lock);
244 }
245
246 /*
247  * usage: pass in a resource on which you have done ldlm_resource_get
248  *        pass in a parent lock on which you have done a ldlm_lock_get
249  *        after return, ldlm_*_put the resource and parent
250  * returns: lock with refcount 2 - one for current caller and one for remote
251  */
252 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
253                                        struct ldlm_resource *resource)
254 {
255         struct ldlm_lock *lock;
256         ENTRY;
257
258         if (resource == NULL)
259                 LBUG();
260
261         OBD_SLAB_ALLOC(lock, ldlm_lock_slab, SLAB_NOFS, sizeof(*lock));
262         if (lock == NULL)
263                 RETURN(NULL);
264
265         lock->l_resource = ldlm_resource_getref(resource);
266
267         atomic_set(&lock->l_refc, 2);
268         INIT_LIST_HEAD(&lock->l_children);
269         INIT_LIST_HEAD(&lock->l_res_link);
270         INIT_LIST_HEAD(&lock->l_lru);
271         INIT_LIST_HEAD(&lock->l_export_chain);
272         INIT_LIST_HEAD(&lock->l_pending_chain);
273         INIT_LIST_HEAD(&lock->l_tmp);
274         INIT_LIST_HEAD(&lock->l_bl_ast);
275         INIT_LIST_HEAD(&lock->l_cp_ast);
276         init_waitqueue_head(&lock->l_waitq);
277         lock->l_blocking_lock = NULL;
278
279         atomic_inc(&resource->lr_namespace->ns_locks);
280
281         if (parent != NULL) {
282                 spin_lock(&resource->lr_namespace->ns_hash_lock);
283                 lock->l_parent = LDLM_LOCK_GET(parent);
284                 list_add(&lock->l_childof, &parent->l_children);
285                 spin_unlock(&resource->lr_namespace->ns_hash_lock);
286         }
287
288         INIT_LIST_HEAD(&lock->l_handle.h_link);
289         class_handle_hash(&lock->l_handle, lock_handle_addref);
290
291         RETURN(lock);
292 }
293
294 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
295                               struct ldlm_res_id new_resid)
296 {
297         struct ldlm_resource *oldres = lock->l_resource;
298         ENTRY;
299
300         lock_res(oldres);
301         if (memcmp(&new_resid, &lock->l_resource->lr_name,
302                    sizeof(lock->l_resource->lr_name)) == 0) {
303                 /* Nothing to do */
304                 unlock_res(oldres);
305                 RETURN(0);
306         }
307
308         LASSERT(new_resid.name[0] != 0);
309
310         /* This function assumes that the lock isn't on any lists */
311         LASSERT(list_empty(&lock->l_res_link));
312
313         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
314                                              lock->l_resource->lr_type, 
315                                              1);
316         if (lock->l_resource == NULL) {
317                 LBUG();
318                 RETURN(-ENOMEM);
319         }
320
321         unlock_res(oldres);
322
323         /* ...and the flowers are still standing! */
324         ldlm_resource_putref(oldres);
325
326         RETURN(0);
327 }
328
329 /*
330  *  HANDLES
331  */
332
333 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
334 {
335         lockh->cookie = lock->l_handle.h_cookie;
336 }
337
338 /* if flags: atomically get the lock and set the flags.
339  *           Return NULL if flag already set
340  */
341
342 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
343 {
344         struct ldlm_namespace *ns;
345         struct ldlm_lock *lock = NULL, *retval = NULL;
346         ENTRY;
347
348         LASSERT(handle);
349
350         lock = class_handle2object(handle->cookie);
351         if (lock == NULL)
352                 RETURN(NULL);
353
354         LASSERT(lock->l_resource != NULL);
355         ns = lock->l_resource->lr_namespace;
356         LASSERT(ns != NULL);
357
358         lock_res(lock->l_resource);
359
360         /* It's unlikely but possible that someone marked the lock as
361          * destroyed after we did handle2object on it */
362         if (lock->l_destroyed) {
363                 unlock_res(lock->l_resource);
364                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
365                 LDLM_LOCK_PUT(lock);
366                 GOTO(out, retval);
367         }
368
369         if (flags && (lock->l_flags & flags)) {
370                 unlock_res(lock->l_resource);
371                 LDLM_LOCK_PUT(lock);
372                 GOTO(out, retval);
373         }
374
375         if (flags)
376                 lock->l_flags |= flags;
377
378         unlock_res(lock->l_resource);
379         retval = lock;
380         EXIT;
381  out:
382         return retval;
383 }
384
385 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
386                                       struct lustre_handle *handle)
387 {
388         struct ldlm_lock *retval = NULL;
389         retval = __ldlm_handle2lock(handle, 0);
390         return retval;
391 }
392
393 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
394 {
395         ldlm_res2desc(lock->l_resource, &desc->l_resource);
396         desc->l_req_mode = lock->l_req_mode;
397         desc->l_granted_mode = lock->l_granted_mode;
398         memcpy(&desc->l_policy_data, &lock->l_policy_data,
399                sizeof(desc->l_policy_data));
400 }
401
402 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
403                            struct list_head *work_list)
404 {
405         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
406                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
407                 lock->l_flags |= LDLM_FL_AST_SENT;
408                 /* If the enqueuing client said so, tell the AST recipient to
409                  * discard dirty data, rather than writing back. */
410                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
411                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
412                 LASSERT(list_empty(&lock->l_bl_ast));
413                 list_add(&lock->l_bl_ast, work_list);
414                 LDLM_LOCK_GET(lock);
415                 LASSERT(lock->l_blocking_lock == NULL);
416                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
417         }
418 }
419
420 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
421 {
422         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
423                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
424                 lock->l_flags |= LDLM_FL_CP_REQD;
425                 LASSERT(list_empty(&lock->l_cp_ast));
426                 list_add(&lock->l_cp_ast, work_list);
427                 LDLM_LOCK_GET(lock);
428         }
429 }
430
431 /* must be called with lr_lock held */
432 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
433                                 struct list_head *work_list)
434 {
435         ENTRY;
436         check_res_locked(lock->l_resource);
437         if (new)
438                 ldlm_add_bl_work_item(lock, new, work_list);
439         else 
440                 ldlm_add_cp_work_item(lock, work_list);
441         EXIT;
442 }
443
444 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
445 {
446         struct ldlm_lock *lock;
447
448         lock = ldlm_handle2lock(lockh);
449         ldlm_lock_addref_internal(lock, mode);
450         LDLM_LOCK_PUT(lock);
451 }
452
453 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
454 {
455         ldlm_lock_remove_from_lru(lock);
456         if (mode & (LCK_NL | LCK_CR | LCK_PR))
457                 lock->l_readers++;
458         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
459                 lock->l_writers++;
460         lock->l_last_used = jiffies;
461         LDLM_LOCK_GET(lock);
462         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
463 }
464
465 /* only called for local locks */
466 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
467 {
468         lock_res(lock->l_resource);
469         ldlm_lock_addref_internal_nolock(lock, mode);
470         unlock_res(lock->l_resource);
471 }
472
473 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
474 {
475         struct ldlm_namespace *ns;
476         ENTRY;
477
478         ns = lock->l_resource->lr_namespace;
479
480         lock_res(lock->l_resource);
481
482         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
483         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
484                 LASSERT(lock->l_readers > 0);
485                 lock->l_readers--;
486         }
487         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
488                 LASSERT(lock->l_writers > 0);
489                 lock->l_writers--;
490         }
491
492         if (lock->l_flags & LDLM_FL_LOCAL &&
493             !lock->l_readers && !lock->l_writers) {
494                 /* If this is a local lock on a server namespace and this was
495                  * the last reference, cancel the lock. */
496                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
497                 lock->l_flags |= LDLM_FL_CBPENDING;
498         }
499
500         if (!lock->l_readers && !lock->l_writers &&
501             (lock->l_flags & LDLM_FL_CBPENDING)) {
502                 /* If we received a blocked AST and this was the last reference,
503                  * run the callback. */
504                 if (ns->ns_client == LDLM_NAMESPACE_SERVER && lock->l_export)
505                         CERROR("FL_CBPENDING set on non-local lock--just a "
506                                "warning\n");
507
508                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
509
510                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
511                 ldlm_lock_remove_from_lru(lock);
512                 unlock_res(lock->l_resource);
513                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
514                                 ldlm_bl_to_thread(ns, NULL, lock) != 0)
515                         ldlm_handle_bl_callback(ns, NULL, lock);
516         } else if (ns->ns_client == LDLM_NAMESPACE_CLIENT &&
517                    !lock->l_readers && !lock->l_writers) {
518                 /* If this is a client-side namespace and this was the last
519                  * reference, put it on the LRU. */
520                 LASSERT(list_empty(&lock->l_lru));
521                 LASSERT(ns->ns_nr_unused >= 0);
522                 spin_lock(&ns->ns_unused_lock);
523                 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
524                 ns->ns_nr_unused++;
525                 spin_unlock(&ns->ns_unused_lock);
526                 unlock_res(lock->l_resource);
527                 ldlm_cancel_lru(ns, LDLM_ASYNC);
528         } else {
529                 unlock_res(lock->l_resource);
530         }
531
532         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
533
534         EXIT;
535 }
536
537 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
538 {
539         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
540         LASSERT(lock != NULL);
541         ldlm_lock_decref_internal(lock, mode);
542         LDLM_LOCK_PUT(lock);
543 }
544
545 /* This will drop a lock reference and mark it for destruction, but will not
546  * necessarily cancel the lock before returning. */
547 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
548 {
549         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
550         ENTRY;
551
552         LASSERT(lock != NULL);
553
554         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
555         lock_res(lock->l_resource);
556         lock->l_flags |= LDLM_FL_CBPENDING;
557         unlock_res(lock->l_resource);
558         ldlm_lock_decref_internal(lock, mode);
559         LDLM_LOCK_PUT(lock);
560 }
561
562 /* NOTE: called by
563  *  - ldlm_lock_enqueue
564  *  - ldlm_reprocess_queue
565  *  - ldlm_lock_convert
566  *
567  * must be called with lr_lock held
568  */
569 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
570 {
571         struct ldlm_resource *res = lock->l_resource;
572         ENTRY;
573
574         check_res_locked(res);
575
576         lock->l_granted_mode = lock->l_req_mode;
577         ldlm_resource_add_lock(res, &res->lr_granted, lock);
578
579         if (lock->l_granted_mode < res->lr_most_restr)
580                 res->lr_most_restr = lock->l_granted_mode;
581
582         if (work_list && lock->l_completion_ast != NULL)
583                 ldlm_add_ast_work_item(lock, NULL, work_list);
584
585         EXIT;
586 }
587
588 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
589  * comment above ldlm_lock_match */
590 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
591                                       ldlm_policy_data_t *policy,
592                                       struct ldlm_lock *old_lock, int flags)
593 {
594         struct ldlm_lock *lock;
595         struct list_head *tmp;
596
597         list_for_each(tmp, queue) {
598                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
599
600                 if (lock == old_lock)
601                         break;
602
603                 /* llite sometimes wants to match locks that will be
604                  * canceled when their users drop, but we allow it to match
605                  * if it passes in CBPENDING and the lock still has users.
606                  * this is generally only going to be used by children 
607                  * whose parents already hold a lock so forward progress
608                  * can still happen. */
609                 if (lock->l_flags & LDLM_FL_CBPENDING &&
610                     !(flags & LDLM_FL_CBPENDING))
611                         continue;
612                 if (lock->l_flags & LDLM_FL_CBPENDING &&
613                     lock->l_readers == 0 && lock->l_writers == 0)
614                         continue;
615
616                 if (!(lock->l_req_mode & mode))
617                         continue;
618
619                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
620                     (lock->l_policy_data.l_extent.start >
621                      policy->l_extent.start ||
622                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
623                         continue;
624
625                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
626                     mode == LCK_GROUP &&
627                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
628                         continue;
629
630                 /* We match if we have existing lock with same or wider set
631                    of bits. */
632                 if (lock->l_resource->lr_type == LDLM_IBITS &&
633                      ((lock->l_policy_data.l_inodebits.bits &
634                       policy->l_inodebits.bits) !=
635                       policy->l_inodebits.bits))
636                         continue;
637
638                 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
639                         continue;
640
641                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
642                     !(lock->l_flags & LDLM_FL_LOCAL))
643                         continue;
644
645                 if (flags & LDLM_FL_TEST_LOCK)
646                         LDLM_LOCK_GET(lock);
647                 else
648                         ldlm_lock_addref_internal_nolock(lock, mode);
649                 return lock;
650         }
651
652         return NULL;
653 }
654
655 void ldlm_lock_allow_match(struct ldlm_lock *lock)
656 {
657         lock_res(lock->l_resource);
658         lock->l_flags |= LDLM_FL_CAN_MATCH;
659         wake_up(&lock->l_waitq);
660         unlock_res(lock->l_resource);
661 }
662
663 /* Can be called in two ways:
664  *
665  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
666  * for a duplicate of.
667  *
668  * Otherwise, all of the fields must be filled in, to match against.
669  *
670  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
671  *     server (ie, connh is NULL)
672  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
673  *     list will be considered
674  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
675  *     to be canceled can still be matched as long as they still have reader
676  *     or writer refernces
677  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
678  *     just tell us if we would have matched.
679  *
680  * Returns 1 if it finds an already-existing lock that is compatible; in this
681  * case, lockh is filled in with a addref()ed lock
682  */
683 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
684                     struct ldlm_res_id *res_id, __u32 type,
685                     ldlm_policy_data_t *policy, ldlm_mode_t mode,
686                     struct lustre_handle *lockh)
687 {
688         struct ldlm_resource *res;
689         struct ldlm_lock *lock, *old_lock = NULL;
690         int rc = 0;
691         ENTRY;
692
693         if (ns == NULL) {
694                 old_lock = ldlm_handle2lock(lockh);
695                 LASSERT(old_lock);
696
697                 ns = old_lock->l_resource->lr_namespace;
698                 res_id = &old_lock->l_resource->lr_name;
699                 type = old_lock->l_resource->lr_type;
700                 mode = old_lock->l_req_mode;
701         }
702
703         res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
704         if (res == NULL) {
705                 LASSERT(old_lock == NULL);
706                 RETURN(0);
707         }
708
709         lock_res(res);
710
711         lock = search_queue(&res->lr_granted, mode, policy, old_lock, flags);
712         if (lock != NULL)
713                 GOTO(out, rc = 1);
714         if (flags & LDLM_FL_BLOCK_GRANTED)
715                 GOTO(out, rc = 0);
716         lock = search_queue(&res->lr_converting, mode, policy, old_lock, flags);
717         if (lock != NULL)
718                 GOTO(out, rc = 1);
719         lock = search_queue(&res->lr_waiting, mode, policy, old_lock, flags);
720         if (lock != NULL)
721                 GOTO(out, rc = 1);
722
723         EXIT;
724  out:
725         unlock_res(res);
726         ldlm_resource_putref(res);
727
728         if (lock) {
729                 ldlm_lock2handle(lock, lockh);
730                 if (!(lock->l_flags & LDLM_FL_CAN_MATCH)) {
731                         struct l_wait_info lwi;
732                         if (lock->l_completion_ast) {
733                                 int err = lock->l_completion_ast(lock,
734                                                                  LDLM_FL_WAIT_NOREPROC,
735                                                                  NULL);
736                                 if (err) {
737                                         rc = 0;
738                                         goto out2;
739                                 }
740                         }
741
742                         lwi = LWI_TIMEOUT_INTR(obd_timeout*HZ, NULL,NULL,NULL);
743
744                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
745                         l_wait_event(lock->l_waitq,
746                                      (lock->l_flags & LDLM_FL_CAN_MATCH), &lwi);
747                 }
748         }
749
750 out2:
751         if (rc) {
752                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
753                            type == LDLM_PLAIN ? res_id->name[2] :
754                                 policy->l_extent.start,
755                            type == LDLM_PLAIN ? res_id->name[3] :
756                            policy->l_extent.end);
757         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/* less verbose for test-only */
758                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
759                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
760                                   type, mode, res_id->name[0], res_id->name[1],
761                                   type == LDLM_PLAIN ? res_id->name[2] :
762                                   policy->l_extent.start,
763                                   type == LDLM_PLAIN ? res_id->name[3] :
764                                   policy->l_extent.end);
765         }
766
767         if (old_lock)
768                 LDLM_LOCK_PUT(old_lock);
769         if (flags & LDLM_FL_TEST_LOCK && rc)
770                 LDLM_LOCK_PUT(lock);
771
772         return rc;
773 }
774
775 /* Returns a referenced lock */
776 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
777                                    struct lustre_handle *parent_lock_handle,
778                                    struct ldlm_res_id res_id, __u32 type,
779                                    ldlm_mode_t mode,
780                                    ldlm_blocking_callback blocking,
781                                    ldlm_completion_callback completion,
782                                    ldlm_glimpse_callback glimpse,
783                                    void *data, __u32 lvb_len)
784 {
785         struct ldlm_resource *res, *parent_res = NULL;
786         struct ldlm_lock *lock, *parent_lock = NULL;
787         ENTRY;
788
789         if (parent_lock_handle) {
790                 parent_lock = ldlm_handle2lock(parent_lock_handle);
791                 if (parent_lock)
792                         parent_res = parent_lock->l_resource;
793         }
794
795         res = ldlm_resource_get(ns, parent_res, res_id,
796                                 type, 1);
797         if (res == NULL)
798                 RETURN(NULL);
799
800         lock = ldlm_lock_new(parent_lock, res);
801         ldlm_resource_putref(res);
802         if (parent_lock != NULL)
803                 LDLM_LOCK_PUT(parent_lock);
804
805         if (lock == NULL)
806                 RETURN(NULL);
807
808         lock->l_req_mode = mode;
809         lock->l_ast_data = data;
810         lock->l_blocking_ast = blocking;
811         lock->l_completion_ast = completion;
812         lock->l_glimpse_ast = glimpse;
813         lock->l_pid = current->pid;
814
815         if (lvb_len) {
816                 lock->l_lvb_len = lvb_len;
817                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
818                 if (lock->l_lvb_data == NULL) {
819                         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
820                         RETURN(NULL);
821                 }
822         }
823
824         RETURN(lock);
825 }
826
827 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
828                                struct ldlm_lock **lockp,
829                                void *cookie, int *flags)
830 {
831         struct ldlm_lock *lock = *lockp;
832         struct ldlm_resource *res = lock->l_resource;
833         int local = res->lr_namespace->ns_client;
834         ldlm_processing_policy policy;
835         ldlm_error_t rc = ELDLM_OK;
836         ENTRY;
837
838         /* policies are not executed on the client or during replay */
839         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
840             && !local && ns->ns_policy) {
841                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
842                                    NULL);
843                 if (rc == ELDLM_LOCK_REPLACED) {
844                         /* The lock that was returned has already been granted,
845                          * and placed into lockp.  If it's not the same as the
846                          * one we passed in, then destroy the old one and our
847                          * work here is done. */
848                         if (lock != *lockp) {
849                                 ldlm_lock_destroy(lock);
850                                 LDLM_LOCK_PUT(lock);
851                         }
852                         *flags |= LDLM_FL_LOCK_CHANGED;
853                         RETURN(0);
854                 } else if (rc == ELDLM_LOCK_ABORTED ||
855                            (rc == 0 && (*flags & LDLM_FL_INTENT_ONLY))) {
856                         ldlm_lock_destroy(lock);
857                         RETURN(rc);
858                 }
859                 LASSERT(rc == ELDLM_OK);
860         }
861
862         lock_res(lock->l_resource);
863         if (local && lock->l_req_mode == lock->l_granted_mode) {
864                 /* The server returned a blocked lock, but it was granted before
865                  * we got a chance to actually enqueue it.  We don't need to do
866                  * anything else. */
867                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
868                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
869                 GOTO(out, ELDLM_OK);
870         }
871
872         /* Some flags from the enqueue want to make it into the AST, via the
873          * lock's l_flags. */
874         lock->l_flags |= (*flags & LDLM_AST_DISCARD_DATA);
875
876         /* This distinction between local lock trees is very important; a client
877          * namespace only has information about locks taken by that client, and
878          * thus doesn't have enough information to decide for itself if it can
879          * be granted (below).  In this case, we do exactly what the server
880          * tells us to do, as dictated by the 'flags'.
881          *
882          * We do exactly the same thing during recovery, when the server is
883          * more or less trusting the clients not to lie.
884          *
885          * FIXME (bug 268): Detect obvious lies by checking compatibility in
886          * granted/converting queues. */
887         ldlm_resource_unlink_lock(lock);
888         if (local) {
889                 if (*flags & LDLM_FL_BLOCK_CONV)
890                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
891                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
892                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
893                 else
894                         ldlm_grant_lock(lock, NULL);
895                 GOTO(out, ELDLM_OK);
896         } else if (*flags & LDLM_FL_REPLAY) {
897                 if (*flags & LDLM_FL_BLOCK_CONV) {
898                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
899                         GOTO(out, ELDLM_OK);
900                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
901                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
902                         GOTO(out, ELDLM_OK);
903                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
904                         ldlm_grant_lock(lock, NULL);
905                         GOTO(out, ELDLM_OK);
906                 }
907                 /* If no flags, fall through to normal enqueue path. */
908         }
909
910         policy = ldlm_processing_policy_table[res->lr_type];
911         policy(lock, flags, 1, &rc, NULL);
912         EXIT;
913 out:
914         unlock_res(lock->l_resource);
915         return rc;
916 }
917
918 /* Must be called with namespace taken: queue is waiting or converting. */
919 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
920                          struct list_head *work_list)
921 {
922         struct list_head *tmp, *pos;
923         ldlm_processing_policy policy;
924         int flags;
925         int rc = LDLM_ITER_CONTINUE;
926         ldlm_error_t err;
927         ENTRY;
928
929         check_res_locked(res);
930
931         policy = ldlm_processing_policy_table[res->lr_type];
932         LASSERT(policy);
933
934         list_for_each_safe(tmp, pos, queue) {
935                 struct ldlm_lock *pending;
936                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
937
938                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
939
940                 flags = 0;
941                 rc = policy(pending, &flags, 0, &err, work_list);
942                 if (rc != LDLM_ITER_CONTINUE)
943                         break;
944         }
945
946         RETURN(rc);
947 }
948
949 int ldlm_run_bl_ast_work(struct list_head *rpc_list)
950 {
951         struct list_head *tmp, *pos;
952         struct ldlm_lock_desc d;
953         int rc = 0, retval = 0;
954         ENTRY;
955
956         list_for_each_safe(tmp, pos, rpc_list) {
957                 struct ldlm_lock *lock =
958                         list_entry(tmp, struct ldlm_lock, l_bl_ast);
959
960                 /* nobody should touch l_bl_ast */
961                 lock_res(lock->l_resource);
962                 list_del_init(&lock->l_bl_ast);
963
964                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
965                 LASSERT(lock->l_bl_ast_run == 0);
966                 LASSERT(lock->l_blocking_lock);
967                 lock->l_bl_ast_run++;
968                 unlock_res(lock->l_resource);
969
970                 ldlm_lock2desc(lock->l_blocking_lock, &d);
971
972                 LDLM_LOCK_PUT(lock->l_blocking_lock);
973                 lock->l_blocking_lock = NULL;
974                 rc = lock->l_blocking_ast(lock, &d, NULL, LDLM_CB_BLOCKING);
975
976                 if (rc == -ERESTART)
977                         retval = rc;
978                 else if (rc)
979                         CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
980                                "disconnect client\n");
981                 LDLM_LOCK_PUT(lock);
982         }
983         RETURN(retval);
984 }
985
986 int ldlm_run_cp_ast_work(struct list_head *rpc_list)
987 {
988         struct list_head *tmp, *pos;
989         int rc = 0, retval = 0;
990         ENTRY;
991
992         /* It's possible to receive a completion AST before we've set
993          * the l_completion_ast pointer: either because the AST arrived
994          * before the reply, or simply because there's a small race
995          * window between receiving the reply and finishing the local
996          * enqueue. (bug 842)
997          *
998          * This can't happen with the blocking_ast, however, because we
999          * will never call the local blocking_ast until we drop our
1000          * reader/writer reference, which we won't do until we get the
1001          * reply and finish enqueueing. */
1002
1003         list_for_each_safe(tmp, pos, rpc_list) {
1004                 struct ldlm_lock *lock =
1005                         list_entry(tmp, struct ldlm_lock, l_cp_ast);
1006
1007                 /* nobody should touch l_cp_ast */
1008                 lock_res(lock->l_resource);
1009                 list_del_init(&lock->l_cp_ast);
1010                 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1011                 lock->l_flags &= ~LDLM_FL_CP_REQD;
1012                 unlock_res(lock->l_resource);
1013
1014                 if (lock->l_completion_ast != NULL)
1015                         rc = lock->l_completion_ast(lock, 0, 0);
1016
1017                 if (rc == -ERESTART)
1018                         retval = rc;
1019                 else if (rc)
1020                         CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
1021                                "disconnect client\n");
1022                 LDLM_LOCK_PUT(lock);
1023         }
1024         RETURN(retval);
1025 }
1026
1027 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1028 {
1029         ldlm_reprocess_all(res);
1030         return LDLM_ITER_CONTINUE;
1031 }
1032
1033 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1034 {
1035         struct list_head *tmp;
1036         int i, rc;
1037
1038         spin_lock(&ns->ns_hash_lock);
1039         for (i = 0; i < RES_HASH_SIZE; i++) {
1040                 tmp = ns->ns_hash[i].next;
1041                 while (tmp != &(ns->ns_hash[i])) {
1042                         struct ldlm_resource *res =
1043                                 list_entry(tmp, struct ldlm_resource, lr_hash);
1044
1045                         ldlm_resource_getref(res);
1046                         spin_unlock(&ns->ns_hash_lock);
1047
1048                         rc = reprocess_one_queue(res, NULL);
1049
1050                         spin_lock(&ns->ns_hash_lock);
1051                         tmp = tmp->next;
1052                         ldlm_resource_putref_locked(res);
1053
1054                         if (rc == LDLM_ITER_STOP)
1055                                 GOTO(out, rc);
1056                 }
1057         }
1058  out:
1059         spin_unlock(&ns->ns_hash_lock);
1060         EXIT;
1061 }
1062
1063 void ldlm_reprocess_all(struct ldlm_resource *res)
1064 {
1065         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1066         int rc;
1067         ENTRY;
1068
1069         /* Local lock trees don't get reprocessed. */
1070         if (res->lr_namespace->ns_client) {
1071                 EXIT;
1072                 return;
1073         }
1074
1075  restart:
1076         lock_res(res);
1077         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1078         if (rc == LDLM_ITER_CONTINUE)
1079                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1080         unlock_res(res);
1081
1082         rc = ldlm_run_cp_ast_work(&rpc_list);
1083         if (rc == -ERESTART) {
1084                 LASSERT(list_empty(&rpc_list));
1085                 goto restart;
1086         }
1087         EXIT;
1088 }
1089
1090 void ldlm_cancel_callback(struct ldlm_lock *lock)
1091 {
1092         check_res_locked(lock->l_resource);
1093
1094         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1095                 lock->l_flags |= LDLM_FL_CANCEL;
1096                 if (lock->l_blocking_ast) {
1097                         unlock_res(lock->l_resource);
1098                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1099                                              LDLM_CB_CANCELING);
1100                         lock_res(lock->l_resource);
1101                 } else {
1102                         LDLM_DEBUG(lock, "no blocking ast");
1103                 }
1104         }
1105 }
1106
1107 void ldlm_lock_cancel(struct ldlm_lock *lock)
1108 {
1109         struct ldlm_resource *res;
1110         struct ldlm_namespace *ns;
1111         ENTRY;
1112
1113         res = lock->l_resource;
1114         ns = res->lr_namespace;
1115
1116         ldlm_del_waiting_lock(lock);
1117         lock_res(res);
1118         
1119         /* Please do not, no matter how tempting, remove this LBUG without
1120          * talking to me first. -phik */
1121         if (lock->l_readers || lock->l_writers) {
1122                 LDLM_ERROR(lock, "lock still has references");
1123                 LBUG();
1124         }
1125
1126         ldlm_cancel_callback(lock);
1127
1128         ldlm_resource_unlink_lock(lock);
1129         unlock_res(res);
1130         
1131         ldlm_lock_destroy(lock);
1132
1133         EXIT;
1134 }
1135
1136 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1137 {
1138         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1139         ENTRY;
1140
1141         if (lock == NULL)
1142                 RETURN(-EINVAL);
1143
1144         lock->l_ast_data = data;
1145         LDLM_LOCK_PUT(lock);
1146         RETURN(0);
1147 }
1148
1149 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1150 {
1151         struct ldlm_lock *lock;
1152         struct ldlm_resource *res;
1153
1154         spin_lock(&exp->exp_ldlm_data.led_lock);
1155         while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) { 
1156                 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
1157                                   struct ldlm_lock, l_export_chain);
1158                 res = ldlm_resource_getref(lock->l_resource);
1159                 LDLM_LOCK_GET(lock);
1160                 spin_unlock(&exp->exp_ldlm_data.led_lock);
1161
1162                 LDLM_DEBUG(lock, "export %p", exp);
1163                 ldlm_lock_cancel(lock);
1164                 ldlm_reprocess_all(res);
1165
1166                 ldlm_resource_putref(res);
1167                 LDLM_LOCK_PUT(lock);
1168                 spin_lock(&exp->exp_ldlm_data.led_lock);
1169         }
1170         spin_unlock(&exp->exp_ldlm_data.led_lock);
1171 }
1172
1173 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1174                                         int *flags)
1175 {
1176         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1177         struct ldlm_resource *res;
1178         struct ldlm_namespace *ns;
1179         int granted = 0;
1180         int old_mode, rc;
1181         ldlm_error_t err;
1182         ENTRY;
1183
1184         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1185                 *flags |= LDLM_FL_BLOCK_GRANTED;
1186                 RETURN(lock->l_resource);
1187         }
1188
1189         LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1190                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1191
1192         res = lock->l_resource;
1193         ns = res->lr_namespace;
1194
1195         lock_res(res);
1196
1197         old_mode = lock->l_req_mode;
1198         lock->l_req_mode = new_mode;
1199         ldlm_resource_unlink_lock(lock);
1200
1201         /* If this is a local resource, put it on the appropriate list. */
1202         if (res->lr_namespace->ns_client) {
1203                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1204                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1205                 } else {
1206                         /* This should never happen, because of the way the
1207                          * server handles conversions. */
1208                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1209                                    *flags);
1210                         LBUG();
1211
1212                         ldlm_grant_lock(lock, &rpc_list);
1213                         granted = 1;
1214                         /* FIXME: completion handling not with ns_lock held ! */
1215                         if (lock->l_completion_ast)
1216                                 lock->l_completion_ast(lock, 0, NULL);
1217                 }
1218         } else {
1219                 int pflags = 0;
1220                 ldlm_processing_policy policy;
1221                 policy = ldlm_processing_policy_table[res->lr_type];
1222                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1223                 if (rc == LDLM_ITER_STOP) {
1224                         lock->l_req_mode = old_mode;
1225                         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1226                         res = NULL;
1227                 } else {
1228                         *flags |= LDLM_FL_BLOCK_GRANTED;
1229                         granted = 1;
1230                 }
1231         }
1232         unlock_res(lock->l_resource);
1233
1234         if (granted)
1235                 ldlm_run_cp_ast_work(&rpc_list);
1236         RETURN(res);
1237 }
1238
1239 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1240 {
1241         char str[PTL_NALFMT_SIZE];
1242         struct obd_device *obd = NULL;
1243
1244         if (!((portal_debug | D_ERROR) & level))
1245                 return;
1246
1247         if (!lock) {
1248                 CDEBUG(level, "  NULL LDLM lock\n");
1249                 return;
1250         }
1251
1252         CDEBUG(level, "  -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1253                lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1254                pos, lock->l_pid);
1255         if (lock->l_conn_export != NULL)
1256                 obd = lock->l_conn_export->exp_obd;
1257         if (lock->l_export && lock->l_export->exp_connection) {
1258                 CDEBUG(level, "  Node: NID %s on %s (rhandle: "LPX64")\n",
1259                        ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str),
1260                        lock->l_export->exp_connection->c_peer.peer_ni->pni_name,
1261                        lock->l_remote_handle.cookie);
1262         } else if (obd == NULL) {
1263                 CDEBUG(level, "  Node: local\n");
1264         } else {
1265                 struct obd_import *imp = obd->u.cli.cl_import;
1266                 CDEBUG(level, "  Node: NID %s on %s (rhandle: "LPX64")\n",
1267                        ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
1268                        imp->imp_connection->c_peer.peer_ni->pni_name,
1269                        lock->l_remote_handle.cookie);
1270         }
1271         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64")\n", lock->l_resource,
1272                lock->l_resource->lr_name.name[0],
1273                lock->l_resource->lr_name.name[1]);
1274         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1275                "write: %d\n", ldlm_lockname[lock->l_req_mode],
1276                ldlm_lockname[lock->l_granted_mode],
1277                atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers);
1278         if (lock->l_resource->lr_type == LDLM_EXTENT)
1279                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1280                        " (req "LPU64"-"LPU64")\n",
1281                        lock->l_policy_data.l_extent.start,
1282                        lock->l_policy_data.l_extent.end,
1283                        lock->l_req_extent.start, lock->l_req_extent.end);
1284         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1285                 CDEBUG(level, "  Pid: "LPU64" Extent: "LPU64" -> "LPU64"\n",
1286                        lock->l_policy_data.l_flock.pid,
1287                        lock->l_policy_data.l_flock.start,
1288                        lock->l_policy_data.l_flock.end);
1289         else if (lock->l_resource->lr_type == LDLM_IBITS)
1290                 CDEBUG(level, " Bits: "LPX64"\n",
1291                        lock->l_policy_data.l_inodebits.bits);
1292 }
1293
1294 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1295 {
1296         struct ldlm_lock *lock;
1297
1298         lock = ldlm_handle2lock(lockh);
1299         if (lock == NULL)
1300                 return;
1301
1302         ldlm_lock_dump(D_OTHER, lock, 0);
1303
1304         LDLM_LOCK_PUT(lock);
1305 }