Whamcloud - gitweb
7c9b3a6bc76250fd510b1d096a2b8b9e8d02ab3b
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lock.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 # ifndef HAVE_VFS_INTENT_PATCHES
47 # include <linux/lustre_intent.h>
48 # endif
49 #else
50 # include <liblustre.h>
51 #endif
52
53 #include <obd_class.h>
54 #include "ldlm_internal.h"
55
56 /* lock types */
57 char *ldlm_lockname[] = {
58         [0] "--",
59         [LCK_EX] "EX",
60         [LCK_PW] "PW",
61         [LCK_PR] "PR",
62         [LCK_CW] "CW",
63         [LCK_CR] "CR",
64         [LCK_NL] "NL",
65         [LCK_GROUP] "GROUP"
66 };
67
68 char *ldlm_typename[] = {
69         [LDLM_PLAIN] "PLN",
70         [LDLM_EXTENT] "EXT",
71         [LDLM_FLOCK] "FLK",
72         [LDLM_IBITS] "IBT",
73 };
74
75 char *ldlm_it2str(int it)
76 {
77         switch (it) {
78         case IT_OPEN:
79                 return "open";
80         case IT_CREAT:
81                 return "creat";
82         case (IT_OPEN | IT_CREAT):
83                 return "open|creat";
84         case IT_READDIR:
85                 return "readdir";
86         case IT_GETATTR:
87                 return "getattr";
88         case IT_LOOKUP:
89                 return "lookup";
90         case IT_UNLINK:
91                 return "unlink";
92         case IT_GETXATTR:
93                 return "getxattr";
94         default:
95                 CERROR("Unknown intent %d\n", it);
96                 return "UNKNOWN";
97         }
98 }
99
100 extern cfs_mem_cache_t *ldlm_lock_slab;
101
102 static ldlm_processing_policy ldlm_processing_policy_table[] = {
103         [LDLM_PLAIN] ldlm_process_plain_lock,
104         [LDLM_EXTENT] ldlm_process_extent_lock,
105 #ifdef __KERNEL__
106         [LDLM_FLOCK] ldlm_process_flock_lock,
107 #endif
108         [LDLM_IBITS] ldlm_process_inodebits_lock,
109 };
110
111 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
112 {
113         return ldlm_processing_policy_table[res->lr_type];
114 }
115
116 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
117 {
118         ns->ns_policy = arg;
119 }
120
121 /*
122  * REFCOUNTED LOCK OBJECTS
123  */
124
125
126 /*
127  * Lock refcounts, during creation:
128  *   - one special one for allocation, dec'd only once in destroy
129  *   - one for being a lock that's in-use
130  *   - one for the addref associated with a new lock
131  */
132 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
133 {
134         atomic_inc(&lock->l_refc);
135         return lock;
136 }
137
138 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
139 {
140         LASSERT(size == sizeof(*lock));
141         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
142 }
143
144 void ldlm_lock_put(struct ldlm_lock *lock)
145 {
146         ENTRY;
147
148         LASSERT(lock->l_resource != LP_POISON);
149         LASSERT(atomic_read(&lock->l_refc) > 0);
150         if (atomic_dec_and_test(&lock->l_refc)) {
151                 struct ldlm_resource *res;
152
153                 LDLM_DEBUG(lock,
154                            "final lock_put on destroyed lock, freeing it.");
155
156                 res = lock->l_resource;
157                 LASSERT(lock->l_destroyed);
158                 LASSERT(list_empty(&lock->l_res_link));
159                 LASSERT(list_empty(&lock->l_pending_chain));
160
161                 atomic_dec(&res->lr_namespace->ns_locks);
162                 ldlm_resource_putref(res);
163                 lock->l_resource = NULL;
164                 if (lock->l_export) {
165                         class_export_put(lock->l_export);
166                         lock->l_export = NULL;
167                 }
168
169                 if (lock->l_lvb_data != NULL)
170                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
171
172                 ldlm_interval_free(ldlm_interval_detach(lock));
173                 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle, 
174                                 ldlm_lock_free);
175         }
176
177         EXIT;
178 }
179
180 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
181 {
182         int rc = 0;
183         if (!list_empty(&lock->l_lru)) {
184                 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
185                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
186                 list_del_init(&lock->l_lru);
187                 ns->ns_nr_unused--;
188                 LASSERT(ns->ns_nr_unused >= 0);
189                 rc = 1;
190         }
191         return rc;
192 }
193
194 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
195 {
196         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
197         int rc;
198         ENTRY;
199         spin_lock(&ns->ns_unused_lock);
200         rc = ldlm_lock_remove_from_lru_nolock(lock);
201         spin_unlock(&ns->ns_unused_lock);
202         EXIT;
203         return rc;
204 }
205
206 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
207 {
208         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
209         lock->l_last_used = cfs_time_current();
210         LASSERT(list_empty(&lock->l_lru));
211         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
212         list_add_tail(&lock->l_lru, &ns->ns_unused_list);
213         LASSERT(ns->ns_nr_unused >= 0);
214         ns->ns_nr_unused++;
215 }
216
217 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
218 {
219         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
220         ENTRY;
221         spin_lock(&ns->ns_unused_lock);
222         ldlm_lock_add_to_lru_nolock(lock);
223         spin_unlock(&ns->ns_unused_lock);
224         EXIT;
225 }
226
227 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
228 {
229         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
230         ENTRY;
231         spin_lock(&ns->ns_unused_lock);
232         if (!list_empty(&lock->l_lru)) {
233                 ldlm_lock_remove_from_lru_nolock(lock);
234                 ldlm_lock_add_to_lru_nolock(lock);
235         }
236         spin_unlock(&ns->ns_unused_lock);
237         EXIT;
238 }
239
240 /* This used to have a 'strict' flag, which recovery would use to mark an
241  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
242  * shall explain why it's gone: with the new hash table scheme, once you call
243  * ldlm_lock_destroy, you can never drop your final references on this lock.
244  * Because it's not in the hash table anymore.  -phil */
245 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
246 {
247         ENTRY;
248
249         if (lock->l_readers || lock->l_writers) {
250                 LDLM_ERROR(lock, "lock still has references");
251                 ldlm_lock_dump(D_ERROR, lock, 0);
252                 LBUG();
253         }
254
255         if (!list_empty(&lock->l_res_link)) {
256                 LDLM_ERROR(lock, "lock still on resource");
257                 ldlm_lock_dump(D_ERROR, lock, 0);
258                 LBUG();
259         }
260
261         if (lock->l_destroyed) {
262                 LASSERT(list_empty(&lock->l_lru));
263                 EXIT;
264                 return 0;
265         }
266         lock->l_destroyed = 1;
267
268         if (lock->l_export && lock->l_export->exp_lock_hash)
269                 lustre_hash_del(lock->l_export->exp_lock_hash,
270                                 &lock->l_remote_handle, &lock->l_exp_hash);
271
272         ldlm_lock_remove_from_lru(lock);
273         class_handle_unhash(&lock->l_handle);
274
275 #if 0
276         /* Wake anyone waiting for this lock */
277         /* FIXME: I should probably add yet another flag, instead of using
278          * l_export to only call this on clients */
279         if (lock->l_export)
280                 class_export_put(lock->l_export);
281         lock->l_export = NULL;
282         if (lock->l_export && lock->l_completion_ast)
283                 lock->l_completion_ast(lock, 0);
284 #endif
285         EXIT;
286         return 1;
287 }
288
289 void ldlm_lock_destroy(struct ldlm_lock *lock)
290 {
291         int first;
292         ENTRY;
293         lock_res_and_lock(lock);
294         first = ldlm_lock_destroy_internal(lock);
295         unlock_res_and_lock(lock);
296
297         /* drop reference from hashtable only for first destroy */
298         if (first)
299                 LDLM_LOCK_PUT(lock);
300         EXIT;
301 }
302
303 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
304 {
305         int first;
306         ENTRY;
307         first = ldlm_lock_destroy_internal(lock);
308         /* drop reference from hashtable only for first destroy */
309         if (first)
310                 LDLM_LOCK_PUT(lock);
311         EXIT;
312 }
313
314 /* this is called by portals_handle2object with the handle lock taken */
315 static void lock_handle_addref(void *lock)
316 {
317         LDLM_LOCK_GET((struct ldlm_lock *)lock);
318 }
319
320 /*
321  * usage: pass in a resource on which you have done ldlm_resource_get
322  *        pass in a parent lock on which you have done a ldlm_lock_get
323  *        after return, ldlm_*_put the resource and parent
324  * returns: lock with refcount 2 - one for current caller and one for remote
325  */
326 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
327 {
328         struct ldlm_lock *lock;
329         ENTRY;
330
331         if (resource == NULL)
332                 LBUG();
333
334         OBD_SLAB_ALLOC(lock, ldlm_lock_slab, CFS_ALLOC_IO, sizeof(*lock));
335         if (lock == NULL)
336                 RETURN(NULL);
337
338         spin_lock_init(&lock->l_lock);
339         lock->l_resource = ldlm_resource_getref(resource);
340
341         atomic_set(&lock->l_refc, 2);
342         CFS_INIT_LIST_HEAD(&lock->l_res_link);
343         CFS_INIT_LIST_HEAD(&lock->l_lru);
344         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
345         CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
346         CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
347         CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
348         cfs_waitq_init(&lock->l_waitq);
349         lock->l_blocking_lock = NULL;
350         CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
351         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
352         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
353
354         atomic_inc(&resource->lr_namespace->ns_locks);
355         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
356         class_handle_hash(&lock->l_handle, lock_handle_addref);
357
358         CFS_INIT_LIST_HEAD(&lock->l_extents_list);
359         spin_lock_init(&lock->l_extents_list_lock);
360         CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list);
361
362         RETURN(lock);
363 }
364
365 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
366                               const struct ldlm_res_id *new_resid)
367 {
368         struct ldlm_resource *oldres = lock->l_resource;
369         struct ldlm_resource *newres;
370         int type;
371         ENTRY;
372
373         LASSERT(ns_is_client(ns));
374
375         lock_res_and_lock(lock);
376         if (memcmp(new_resid, &lock->l_resource->lr_name,
377                    sizeof(lock->l_resource->lr_name)) == 0) {
378                 /* Nothing to do */
379                 unlock_res_and_lock(lock);
380                 RETURN(0);
381         }
382
383         LASSERT(new_resid->name[0] != 0);
384
385         /* This function assumes that the lock isn't on any lists */
386         LASSERT(list_empty(&lock->l_res_link));
387
388         type = oldres->lr_type;
389         unlock_res_and_lock(lock);
390
391         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
392         if (newres == NULL)
393                 RETURN(-ENOMEM);
394
395         lock_res_and_lock(lock);
396         LASSERT(memcmp(new_resid, &lock->l_resource->lr_name,
397                        sizeof(lock->l_resource->lr_name)) != 0);
398         lock_res(newres);
399         lock->l_resource = newres;
400         unlock_res(oldres);
401         unlock_res_and_lock(lock);
402
403         /* ...and the flowers are still standing! */
404         ldlm_resource_putref(oldres);
405
406         RETURN(0);
407 }
408
409 /*
410  *  HANDLES
411  */
412
413 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
414 {
415         lockh->cookie = lock->l_handle.h_cookie;
416 }
417
418 /* if flags: atomically get the lock and set the flags.
419  *           Return NULL if flag already set
420  */
421
422 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
423                                      int flags)
424 {
425         struct ldlm_namespace *ns;
426         struct ldlm_lock *lock, *retval = NULL;
427         ENTRY;
428
429         LASSERT(handle);
430
431         lock = class_handle2object(handle->cookie);
432         if (lock == NULL)
433                 RETURN(NULL);
434
435         LASSERT(lock->l_resource != NULL);
436         ns = lock->l_resource->lr_namespace;
437         LASSERT(ns != NULL);
438
439         lock_res_and_lock(lock);
440
441         /* It's unlikely but possible that someone marked the lock as
442          * destroyed after we did handle2object on it */
443         if (lock->l_destroyed) {
444                 unlock_res_and_lock(lock);
445                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
446                 LDLM_LOCK_PUT(lock);
447                 GOTO(out, retval);
448         }
449
450         if (flags && (lock->l_flags & flags)) {
451                 unlock_res_and_lock(lock);
452                 LDLM_LOCK_PUT(lock);
453                 GOTO(out, retval);
454         }
455
456         if (flags)
457                 lock->l_flags |= flags;
458
459         unlock_res_and_lock(lock);
460         retval = lock;
461         EXIT;
462  out:
463         return retval;
464 }
465
466 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
467                                       const struct lustre_handle *handle)
468 {
469         struct ldlm_lock *retval = NULL;
470         retval = __ldlm_handle2lock(handle, 0);
471         return retval;
472 }
473
474 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
475 {
476         struct obd_export *exp = lock->l_export?:lock->l_conn_export;
477         /* INODEBITS_INTEROP: If the other side does not support
478          * inodebits, reply with a plain lock descriptor.
479          */
480         if ((lock->l_resource->lr_type == LDLM_IBITS) &&
481             (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
482                 struct ldlm_resource res = *lock->l_resource;
483
484                 /* Make sure all the right bits are set in this lock we
485                    are going to pass to client */
486                 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
487                          (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
488                          "Inappropriate inode lock bits during "
489                          "conversion " LPU64 "\n",
490                          lock->l_policy_data.l_inodebits.bits);
491                 res.lr_type = LDLM_PLAIN;
492                 ldlm_res2desc(&res, &desc->l_resource);
493                 /* Convert "new" lock mode to something old client can
494                    understand */
495                 if ((lock->l_req_mode == LCK_CR) ||
496                     (lock->l_req_mode == LCK_CW))
497                         desc->l_req_mode = LCK_PR;
498                 else
499                         desc->l_req_mode = lock->l_req_mode;
500                 if ((lock->l_granted_mode == LCK_CR) ||
501                     (lock->l_granted_mode == LCK_CW)) {
502                         desc->l_granted_mode = LCK_PR;
503                 } else {
504                         /* We never grant PW/EX locks to clients */
505                         LASSERT((lock->l_granted_mode != LCK_PW) &&
506                                 (lock->l_granted_mode != LCK_EX));
507                         desc->l_granted_mode = lock->l_granted_mode;
508                 }
509
510                 /* We do not copy policy here, because there is no
511                    policy for plain locks */
512         } else {
513                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
514                 desc->l_req_mode = lock->l_req_mode;
515                 desc->l_granted_mode = lock->l_granted_mode;
516                 desc->l_policy_data = lock->l_policy_data;
517         }
518 }
519
520 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
521                            struct list_head *work_list)
522 {
523         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
524                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
525                 lock->l_flags |= LDLM_FL_AST_SENT;
526                 /* If the enqueuing client said so, tell the AST recipient to
527                  * discard dirty data, rather than writing back. */
528                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
529                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
530                 LASSERT(list_empty(&lock->l_bl_ast));
531                 list_add(&lock->l_bl_ast, work_list);
532                 LDLM_LOCK_GET(lock);
533                 LASSERT(lock->l_blocking_lock == NULL);
534                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
535         }
536 }
537
538 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
539 {
540         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
541                 lock->l_flags |= LDLM_FL_CP_REQD;
542                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
543                 LASSERT(list_empty(&lock->l_cp_ast));
544                 list_add(&lock->l_cp_ast, work_list);
545                 LDLM_LOCK_GET(lock);
546         }
547 }
548
549 /* must be called with lr_lock held */
550 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
551                                 struct list_head *work_list)
552 {
553         ENTRY;
554         check_res_locked(lock->l_resource);
555         if (new)
556                 ldlm_add_bl_work_item(lock, new, work_list);
557         else
558                 ldlm_add_cp_work_item(lock, work_list);
559         EXIT;
560 }
561
562 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
563 {
564         struct ldlm_lock *lock;
565
566         lock = ldlm_handle2lock(lockh);
567         LASSERT(lock != NULL);
568         ldlm_lock_addref_internal(lock, mode);
569         LDLM_LOCK_PUT(lock);
570 }
571
572 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
573 {
574         ldlm_lock_remove_from_lru(lock);
575         if (mode & (LCK_NL | LCK_CR | LCK_PR))
576                 lock->l_readers++;
577         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
578                 lock->l_writers++;
579         LDLM_LOCK_GET(lock);
580         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
581 }
582
583 /* only called for local locks */
584 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
585 {
586         lock_res_and_lock(lock);
587         ldlm_lock_addref_internal_nolock(lock, mode);
588         unlock_res_and_lock(lock);
589 }
590
591 /* only called in ldlm_flock_destroy and for local locks.
592  *  * for LDLM_FLOCK type locks, l_blocking_ast is null, and
593  *   * ldlm_lock_remove_from_lru() does nothing, it is safe 
594  *    * for ldlm_flock_destroy usage by dropping some code */
595 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
596 {
597         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
598         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
599                 LASSERT(lock->l_readers > 0);
600                 lock->l_readers--;
601         }
602         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
603                 LASSERT(lock->l_writers > 0);
604                 lock->l_writers--;
605         }
606
607         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
608 }
609
610 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
611 {
612         struct ldlm_namespace *ns;
613         ENTRY;
614
615         lock_res_and_lock(lock);
616
617         ns = lock->l_resource->lr_namespace;
618
619         ldlm_lock_decref_internal_nolock(lock, mode);
620
621         if (lock->l_flags & LDLM_FL_LOCAL &&
622             !lock->l_readers && !lock->l_writers) {
623                 /* If this is a local lock on a server namespace and this was
624                  * the last reference, cancel the lock. */
625                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
626                 lock->l_flags |= LDLM_FL_CBPENDING;
627         }
628
629         if (!lock->l_readers && !lock->l_writers &&
630             (lock->l_flags & LDLM_FL_CBPENDING)) {
631                 /* If we received a blocked AST and this was the last reference,
632                  * run the callback. */
633                 if (ns_is_server(ns) && lock->l_export)
634                         CERROR("FL_CBPENDING set on non-local lock--just a "
635                                "warning\n");
636
637                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
638
639                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
640                 ldlm_lock_remove_from_lru(lock);
641                 unlock_res_and_lock(lock);
642                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
643                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
644                         ldlm_handle_bl_callback(ns, NULL, lock);
645         } else if (ns_is_client(ns) &&
646                    !lock->l_readers && !lock->l_writers &&
647                    !(lock->l_flags & LDLM_FL_NO_LRU) &&
648                    !(lock->l_flags & LDLM_FL_BL_AST)) {
649                 /* If this is a client-side namespace and this was the last
650                  * reference, put it on the LRU. */
651                 ldlm_lock_add_to_lru(lock);
652                 unlock_res_and_lock(lock);
653                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE 
654                  * are not supported by the server, otherwise, it is done on 
655                  * enqueue. */
656                 if (!exp_connect_cancelset(lock->l_conn_export) && 
657                     !ns_connect_lru_resize(ns))
658                         ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
659         } else {
660                 unlock_res_and_lock(lock);
661         }
662
663         EXIT;
664 }
665
666 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
667 {
668         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
669         LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
670         ldlm_lock_decref_internal(lock, mode);
671         LDLM_LOCK_PUT(lock);
672 }
673
674 /* This will drop a lock reference and mark it for destruction, but will not
675  * necessarily cancel the lock before returning. */
676 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
677 {
678         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
679         ENTRY;
680
681         LASSERT(lock != NULL);
682
683         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
684         lock_res_and_lock(lock);
685         lock->l_flags |= LDLM_FL_CBPENDING;
686         unlock_res_and_lock(lock);
687         ldlm_lock_decref_internal(lock, mode);
688         LDLM_LOCK_PUT(lock);
689 }
690
691 struct sl_insert_point {
692         struct list_head *res_link;
693         struct list_head *mode_link;
694         struct list_head *policy_link;
695 };
696
697 /*
698  * search_granted_lock
699  *
700  * Description:
701  *      Finds a position to insert the new lock.
702  * Parameters:
703  *      queue [input]:  the granted list where search acts on;
704  *      req [input]:    the lock whose position to be located;
705  *      prev [output]:  positions within 3 lists to insert @req to
706  * Return Value:
707  *      filled @prev
708  * NOTE: called by
709  *  - ldlm_grant_lock_with_skiplist
710  */
711 static void search_granted_lock(struct list_head *queue,
712                                 struct ldlm_lock *req,
713                                 struct sl_insert_point *prev)
714 {
715         struct list_head *tmp;
716         struct ldlm_lock *lock, *mode_end, *policy_end;
717         ENTRY;
718
719         list_for_each(tmp, queue) {
720                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
721
722                 mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
723                                       l_sl_mode);
724
725                 if (lock->l_req_mode != req->l_req_mode) {
726                         /* jump to last lock of mode group */
727                         tmp = &mode_end->l_res_link;
728                         continue;
729                 }
730
731                 /* suitable mode group is found */
732                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
733                         /* insert point is last lock of the mode group */
734                         prev->res_link = &mode_end->l_res_link;
735                         prev->mode_link = &mode_end->l_sl_mode;
736                         prev->policy_link = &req->l_sl_policy;
737                         EXIT;
738                         return;
739                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
740                         for (;;) {
741                                 policy_end = list_entry(lock->l_sl_policy.prev,
742                                                         struct ldlm_lock,
743                                                         l_sl_policy);
744
745                                 if (lock->l_policy_data.l_inodebits.bits ==
746                                     req->l_policy_data.l_inodebits.bits) {
747                                         /* insert point is last lock of
748                                          * the policy group */
749                                         prev->res_link =
750                                                 &policy_end->l_res_link;
751                                         prev->mode_link =
752                                                 &policy_end->l_sl_mode;
753                                         prev->policy_link =
754                                                 &policy_end->l_sl_policy;
755                                         EXIT;
756                                         return;
757                                 }
758
759                                 if (policy_end == mode_end)
760                                         /* done with mode group */
761                                         break;
762
763                                 /* jump to next policy group within the mode group */
764                                 tmp = policy_end->l_res_link.next;
765                                 lock = list_entry(tmp, struct ldlm_lock, 
766                                                   l_res_link);
767                         }  /* loop over policy groups within the mode group */
768
769                         /* insert point is last lock of the mode group,
770                          * new policy group is started */
771                         prev->res_link = &mode_end->l_res_link;
772                         prev->mode_link = &mode_end->l_sl_mode;
773                         prev->policy_link = &req->l_sl_policy;
774                         EXIT;
775                         return;
776                 } else {
777                         LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
778                         LBUG();
779                 }
780         }
781
782         /* insert point is last lock on the queue,
783          * new mode group and new policy group are started */
784         prev->res_link = queue->prev;
785         prev->mode_link = &req->l_sl_mode;
786         prev->policy_link = &req->l_sl_policy;
787         EXIT;
788         return;
789 }
790
791 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, 
792                                        struct sl_insert_point *prev)
793 {
794         struct ldlm_resource *res = lock->l_resource;
795         ENTRY;
796
797         check_res_locked(res);
798
799         ldlm_resource_dump(D_INFO, res);
800         CDEBUG(D_OTHER, "About to add this lock:\n");
801         ldlm_lock_dump(D_OTHER, lock, 0);
802
803         if (lock->l_destroyed) {
804                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
805                 return;
806         }
807
808         LASSERT(list_empty(&lock->l_res_link));
809         LASSERT(list_empty(&lock->l_sl_mode));
810         LASSERT(list_empty(&lock->l_sl_policy));
811
812         list_add(&lock->l_res_link, prev->res_link);
813         list_add(&lock->l_sl_mode, prev->mode_link);
814         list_add(&lock->l_sl_policy, prev->policy_link);
815
816         EXIT;
817 }
818
819 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
820 {
821         struct sl_insert_point prev;
822         ENTRY;
823
824         LASSERT(lock->l_req_mode == lock->l_granted_mode);
825
826         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
827         ldlm_granted_list_add_lock(lock, &prev);
828         EXIT;
829 }
830
831 /* NOTE: called by
832  *  - ldlm_lock_enqueue
833  *  - ldlm_reprocess_queue
834  *  - ldlm_lock_convert
835  *
836  * must be called with lr_lock held
837  */
838 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
839 {
840         struct ldlm_resource *res = lock->l_resource;
841         ENTRY;
842
843         check_res_locked(res);
844
845         lock->l_granted_mode = lock->l_req_mode;
846         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
847                 ldlm_grant_lock_with_skiplist(lock);
848         else if (res->lr_type == LDLM_EXTENT)
849                 ldlm_extent_add_lock(res, lock);
850         else
851                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
852
853         if (lock->l_granted_mode < res->lr_most_restr)
854                 res->lr_most_restr = lock->l_granted_mode;
855
856         if (work_list && lock->l_completion_ast != NULL)
857                 ldlm_add_ast_work_item(lock, NULL, work_list);
858
859         ldlm_pool_add(&res->lr_namespace->ns_pool, lock);
860         EXIT;
861 }
862
863 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
864  * comment above ldlm_lock_match */
865 static struct ldlm_lock *search_queue(struct list_head *queue,
866                                       ldlm_mode_t *mode,
867                                       ldlm_policy_data_t *policy,
868                                       struct ldlm_lock *old_lock, int flags)
869 {
870         struct ldlm_lock *lock;
871         struct list_head *tmp;
872
873         list_for_each(tmp, queue) {
874                 ldlm_mode_t match;
875
876                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
877
878                 if (lock == old_lock)
879                         break;
880
881                 /* llite sometimes wants to match locks that will be
882                  * canceled when their users drop, but we allow it to match
883                  * if it passes in CBPENDING and the lock still has users.
884                  * this is generally only going to be used by children
885                  * whose parents already hold a lock so forward progress
886                  * can still happen. */
887                 if (lock->l_flags & LDLM_FL_CBPENDING &&
888                     !(flags & LDLM_FL_CBPENDING))
889                         continue;
890                 if (lock->l_flags & LDLM_FL_CBPENDING &&
891                     lock->l_readers == 0 && lock->l_writers == 0)
892                         continue;
893
894                 if (!(lock->l_req_mode & *mode))
895                         continue;
896                 match = lock->l_req_mode;
897
898                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
899                     (lock->l_policy_data.l_extent.start >
900                      policy->l_extent.start ||
901                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
902                         continue;
903
904                 if (unlikely(match == LCK_GROUP) &&
905                     lock->l_resource->lr_type == LDLM_EXTENT &&
906                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
907                         continue;
908
909                 /* We match if we have existing lock with same or wider set
910                    of bits. */
911                 if (lock->l_resource->lr_type == LDLM_IBITS &&
912                      ((lock->l_policy_data.l_inodebits.bits &
913                       policy->l_inodebits.bits) !=
914                       policy->l_inodebits.bits))
915                         continue;
916
917                 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
918                         continue;
919
920                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
921                     !(lock->l_flags & LDLM_FL_LOCAL))
922                         continue;
923
924                 if (flags & LDLM_FL_TEST_LOCK) {
925                         LDLM_LOCK_GET(lock);
926                         ldlm_lock_touch_in_lru(lock);
927                 } else {
928                         ldlm_lock_addref_internal_nolock(lock, match);
929                 }
930                 *mode = match;
931                 return lock;
932         }
933
934         return NULL;
935 }
936
937 void ldlm_lock_allow_match(struct ldlm_lock *lock)
938 {
939         lock_res_and_lock(lock);
940         lock->l_flags |= LDLM_FL_LVB_READY;
941         cfs_waitq_signal(&lock->l_waitq);
942         unlock_res_and_lock(lock);
943 }
944
945 /**
946  * Checks if requested extent lock is compatible with another owned lock.
947  *
948  * Checks if \a lock is compatible with a read or write lock
949  * (specified by \a rw) for an extent [\a start , \a end].
950  *
951  * \param lock the already owned lock
952  * \param rw OBD_BRW_READ if requested for reading,
953  *           OBD_BRW_WRITE if requested for writing
954  * \param start start of the requested extent
955  * \param end end of the requested extent
956  * \param cookie transparent parameter for passing locking context
957  *
958  * \post result == 1, *cookie == context, appropriate lock is referenced
959  *
960  * \retval 1 owned lock is reused for the request
961  * \retval 0 no lock reused for the request
962  *
963  * \see ldlm_lock_fast_release
964  */
965 int ldlm_lock_fast_match(struct ldlm_lock *lock, int rw,
966                          obd_off start, obd_off end,
967                          void **cookie)
968 {
969         LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE);
970
971         if (!lock)
972                 return 0;
973
974         lock_res_and_lock(lock);
975         /* check if granted mode is compatible */
976         if (rw == OBD_BRW_WRITE &&
977             !(lock->l_granted_mode & (LCK_PW|LCK_GROUP)))
978                 goto no_match;
979
980         /* does the lock cover the region we would like to access? */
981         if ((lock->l_policy_data.l_extent.start > start) ||
982             (lock->l_policy_data.l_extent.end < end))
983                 goto no_match;
984
985         /* if we received a blocking callback and the lock is no longer
986          * referenced, don't use it */
987         if ((lock->l_flags & LDLM_FL_CBPENDING) &&
988             !lock->l_writers && !lock->l_readers)
989                 goto no_match;
990
991         ldlm_lock_addref_internal_nolock(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR);
992         unlock_res_and_lock(lock);
993         *cookie = (void *)lock;
994         return 1; /* avoid using rc for stack relief */
995
996 no_match:
997         unlock_res_and_lock(lock);
998         return 0;
999 }
1000
1001 /**
1002  * Releases a reference to a lock taken in a "fast" way.
1003  *
1004  * Releases a read or write (specified by \a rw) lock
1005  * referenced by \a cookie.
1006  *
1007  * \param rw OBD_BRW_READ if requested for reading,
1008  *           OBD_BRW_WRITE if requested for writing
1009  * \param cookie transparent parameter for passing locking context
1010  *
1011  * \post appropriate lock is dereferenced
1012  *
1013  * \see ldlm_lock_fast_lock
1014  */
1015 void ldlm_lock_fast_release(void *cookie, int rw)
1016 {
1017         struct ldlm_lock *lock = (struct ldlm_lock *)cookie;
1018
1019         LASSERT(lock != NULL);
1020         LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE);
1021         LASSERT(rw == OBD_BRW_READ ||
1022                 (lock->l_granted_mode & (LCK_PW | LCK_GROUP)));
1023         ldlm_lock_decref_internal(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR);
1024 }
1025
1026 /* Can be called in two ways:
1027  *
1028  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1029  * for a duplicate of.
1030  *
1031  * Otherwise, all of the fields must be filled in, to match against.
1032  *
1033  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1034  *     server (ie, connh is NULL)
1035  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1036  *     list will be considered
1037  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1038  *     to be canceled can still be matched as long as they still have reader
1039  *     or writer refernces
1040  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1041  *     just tell us if we would have matched.
1042  *
1043  * Returns 1 if it finds an already-existing lock that is compatible; in this
1044  * case, lockh is filled in with a addref()ed lock
1045  *
1046  * we also check security context, if that failed we simply return 0 (to keep
1047  * caller code unchanged), the context failure will be discovered by caller
1048  * sometime later.
1049  */
1050 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1051                             const struct ldlm_res_id *res_id, ldlm_type_t type,
1052                             ldlm_policy_data_t *policy, ldlm_mode_t mode,
1053                             struct lustre_handle *lockh)
1054 {
1055         struct ldlm_resource *res;
1056         struct ldlm_lock *lock, *old_lock = NULL;
1057         int rc = 0;
1058         ENTRY;
1059
1060         if (ns == NULL) {
1061                 old_lock = ldlm_handle2lock(lockh);
1062                 LASSERT(old_lock);
1063
1064                 ns = old_lock->l_resource->lr_namespace;
1065                 res_id = &old_lock->l_resource->lr_name;
1066                 type = old_lock->l_resource->lr_type;
1067                 mode = old_lock->l_req_mode;
1068         }
1069
1070         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1071         if (res == NULL) {
1072                 LASSERT(old_lock == NULL);
1073                 RETURN(0);
1074         }
1075
1076         lock_res(res);
1077
1078         lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags);
1079         if (lock != NULL)
1080                 GOTO(out, rc = 1);
1081         if (flags & LDLM_FL_BLOCK_GRANTED)
1082                 GOTO(out, rc = 0);
1083         lock = search_queue(&res->lr_converting, &mode, policy, old_lock, flags);
1084         if (lock != NULL)
1085                 GOTO(out, rc = 1);
1086         lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags);
1087         if (lock != NULL)
1088                 GOTO(out, rc = 1);
1089
1090         EXIT;
1091  out:
1092         unlock_res(res);
1093         ldlm_resource_putref(res);
1094
1095         if (lock) {
1096                 ldlm_lock2handle(lock, lockh);
1097                 if ((flags & LDLM_FL_LVB_READY) &&
1098                     (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1099                         struct l_wait_info lwi;
1100                         if (lock->l_completion_ast) {
1101                                 int err = lock->l_completion_ast(lock,
1102                                                           LDLM_FL_WAIT_NOREPROC,
1103                                                                  NULL);
1104                                 if (err) {
1105                                         if (flags & LDLM_FL_TEST_LOCK)
1106                                                 LDLM_LOCK_PUT(lock);
1107                                         else
1108                                                 ldlm_lock_decref_internal(lock, mode);
1109                                         rc = 0;
1110                                         goto out2;
1111                                 }
1112                         }
1113
1114                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), NULL,
1115                                                LWI_ON_SIGNAL_NOOP, NULL);
1116
1117                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1118                         l_wait_event(lock->l_waitq,
1119                                      (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1120                 }
1121         }
1122  out2:
1123         if (rc) {
1124                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1125                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1126                                 res_id->name[2] : policy->l_extent.start,
1127                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1128                                 res_id->name[3] : policy->l_extent.end);
1129
1130                 /* check user's security context */
1131                 if (lock->l_conn_export &&
1132                     sptlrpc_import_check_ctx(
1133                                 class_exp2cliimp(lock->l_conn_export))) {
1134                         if (!(flags & LDLM_FL_TEST_LOCK))
1135                                 ldlm_lock_decref_internal(lock, mode);
1136                         rc = 0;
1137                 }
1138
1139                 if (flags & LDLM_FL_TEST_LOCK)
1140                         LDLM_LOCK_PUT(lock);
1141
1142         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1143                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1144                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1145                                   type, mode, res_id->name[0], res_id->name[1],
1146                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1147                                         res_id->name[2] :policy->l_extent.start,
1148                                 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1149                                         res_id->name[3] : policy->l_extent.end);
1150         }
1151         if (old_lock)
1152                 LDLM_LOCK_PUT(old_lock);
1153
1154         return rc ? mode : 0;
1155 }
1156
1157 /* Returns a referenced lock */
1158 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1159                                    const struct ldlm_res_id *res_id,
1160                                    ldlm_type_t type,
1161                                    ldlm_mode_t mode,
1162                                    ldlm_blocking_callback blocking,
1163                                    ldlm_completion_callback completion,
1164                                    ldlm_glimpse_callback glimpse,
1165                                    void *data, __u32 lvb_len)
1166 {
1167         struct ldlm_lock *lock;
1168         struct ldlm_resource *res;
1169         ENTRY;
1170
1171         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1172         if (res == NULL)
1173                 RETURN(NULL);
1174
1175         lock = ldlm_lock_new(res);
1176         ldlm_resource_putref(res);
1177
1178         if (lock == NULL)
1179                 RETURN(NULL);
1180
1181         lock->l_req_mode = mode;
1182         lock->l_ast_data = data;
1183         lock->l_blocking_ast = blocking;
1184         lock->l_completion_ast = completion;
1185         lock->l_glimpse_ast = glimpse;
1186         lock->l_pid = cfs_curproc_pid();
1187
1188         lock->l_tree_node = NULL;
1189         /* if this is the extent lock, allocate the interval tree node */
1190         if (type == LDLM_EXTENT) {
1191                 if (ldlm_interval_alloc(lock) == NULL)
1192                         GOTO(out, 0);
1193         }
1194
1195         if (lvb_len) {
1196                 lock->l_lvb_len = lvb_len;
1197                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1198                 if (lock->l_lvb_data == NULL)
1199                         GOTO(out, 0);
1200         }
1201
1202         RETURN(lock);
1203
1204 out:
1205         if (lock->l_lvb_data)
1206                 OBD_FREE(lock->l_lvb_data, lvb_len);
1207         ldlm_interval_free(ldlm_interval_detach(lock));
1208         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
1209         return NULL;
1210 }
1211
1212 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1213                                struct ldlm_lock **lockp,
1214                                void *cookie, int *flags)
1215 {
1216         struct ldlm_lock *lock = *lockp;
1217         struct ldlm_resource *res = lock->l_resource;
1218         int local = ns_is_client(res->lr_namespace);
1219         ldlm_processing_policy policy;
1220         ldlm_error_t rc = ELDLM_OK;
1221         struct ldlm_interval *node = NULL;
1222         ENTRY;
1223
1224         do_gettimeofday(&lock->l_enqueued_time);
1225         /* policies are not executed on the client or during replay */
1226         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1227             && !local && ns->ns_policy) {
1228                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1229                                    NULL);
1230                 if (rc == ELDLM_LOCK_REPLACED) {
1231                         /* The lock that was returned has already been granted,
1232                          * and placed into lockp.  If it's not the same as the
1233                          * one we passed in, then destroy the old one and our
1234                          * work here is done. */
1235                         if (lock != *lockp) {
1236                                 ldlm_lock_destroy(lock);
1237                                 LDLM_LOCK_PUT(lock);
1238                         }
1239                         *flags |= LDLM_FL_LOCK_CHANGED;
1240                         RETURN(0);
1241                 } else if (rc != ELDLM_OK ||
1242                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1243                         ldlm_lock_destroy(lock);
1244                         RETURN(rc);
1245                 }
1246         }
1247
1248         /* For a replaying lock, it might be already in granted list. So
1249          * unlinking the lock will cause the interval node to be freed, we
1250          * have to allocate the interval node early otherwise we can't regrant
1251          * this lock in the future. - jay */
1252         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1253                 OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO,
1254                                sizeof(*node));
1255
1256         lock_res_and_lock(lock);
1257         if (local && lock->l_req_mode == lock->l_granted_mode) {
1258                 /* The server returned a blocked lock, but it was granted
1259                  * before we got a chance to actually enqueue it.  We don't
1260                  * need to do anything else. */
1261                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1262                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1263                 GOTO(out, ELDLM_OK);
1264         }
1265
1266         ldlm_resource_unlink_lock(lock);
1267         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1268                 if (node == NULL) {
1269                         ldlm_lock_destroy_nolock(lock);
1270                         GOTO(out, rc = -ENOMEM);
1271                 }
1272
1273                 CFS_INIT_LIST_HEAD(&node->li_group);
1274                 ldlm_interval_attach(node, lock);
1275                 node = NULL;
1276         }
1277
1278         /* Some flags from the enqueue want to make it into the AST, via the
1279          * lock's l_flags. */
1280         lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1281
1282         /* This distinction between local lock trees is very important; a client
1283          * namespace only has information about locks taken by that client, and
1284          * thus doesn't have enough information to decide for itself if it can
1285          * be granted (below).  In this case, we do exactly what the server
1286          * tells us to do, as dictated by the 'flags'.
1287          *
1288          * We do exactly the same thing during recovery, when the server is
1289          * more or less trusting the clients not to lie.
1290          *
1291          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1292          * granted/converting queues. */
1293         if (local) {
1294                 if (*flags & LDLM_FL_BLOCK_CONV)
1295                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1296                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1297                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1298                 else
1299                         ldlm_grant_lock(lock, NULL);
1300                 GOTO(out, ELDLM_OK);
1301         } else if (*flags & LDLM_FL_REPLAY) {
1302                 if (*flags & LDLM_FL_BLOCK_CONV) {
1303                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1304                         GOTO(out, ELDLM_OK);
1305                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1306                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1307                         GOTO(out, ELDLM_OK);
1308                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1309                         ldlm_grant_lock(lock, NULL);
1310                         GOTO(out, ELDLM_OK);
1311                 }
1312                 /* If no flags, fall through to normal enqueue path. */
1313         }
1314
1315         policy = ldlm_processing_policy_table[res->lr_type];
1316         policy(lock, flags, 1, &rc, NULL);
1317         GOTO(out, rc);
1318 out:
1319         unlock_res_and_lock(lock);
1320         if (node)
1321                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1322         return rc;
1323 }
1324
1325 /* Must be called with namespace taken: queue is waiting or converting. */
1326 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1327                          struct list_head *work_list)
1328 {
1329         struct list_head *tmp, *pos;
1330         ldlm_processing_policy policy;
1331         int flags;
1332         int rc = LDLM_ITER_CONTINUE;
1333         ldlm_error_t err;
1334         ENTRY;
1335
1336         check_res_locked(res);
1337
1338         policy = ldlm_processing_policy_table[res->lr_type];
1339         LASSERT(policy);
1340
1341         list_for_each_safe(tmp, pos, queue) {
1342                 struct ldlm_lock *pending;
1343                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1344
1345                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1346
1347                 flags = 0;
1348                 rc = policy(pending, &flags, 0, &err, work_list);
1349                 if (rc != LDLM_ITER_CONTINUE)
1350                         break;
1351         }
1352
1353         RETURN(rc);
1354 }
1355
1356 /* Helper function for ldlm_run_ast_work().
1357  * 
1358  * Send an existing rpc set specified by @arg->set and then
1359  * destroy it. Create new one if @do_create flag is set. */
1360 static void
1361 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1362 {
1363         ENTRY;
1364
1365         ptlrpc_set_wait(arg->set);
1366         if (arg->type == LDLM_BL_CALLBACK)
1367                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1368         ptlrpc_set_destroy(arg->set);
1369
1370         if (do_create)
1371                 arg->set = ptlrpc_prep_set();
1372
1373         EXIT;
1374 }
1375
1376 static int
1377 ldlm_work_bl_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
1378 {
1379         struct ldlm_lock_desc d;
1380         struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_bl_ast);
1381         ENTRY;
1382
1383         /* nobody should touch l_bl_ast */
1384         lock_res_and_lock(lock);
1385         list_del_init(&lock->l_bl_ast);
1386
1387         LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1388         LASSERT(lock->l_bl_ast_run == 0);
1389         LASSERT(lock->l_blocking_lock);
1390         lock->l_bl_ast_run++;
1391         unlock_res_and_lock(lock);
1392
1393         ldlm_lock2desc(lock->l_blocking_lock, &d);
1394
1395         LDLM_LOCK_PUT(lock->l_blocking_lock);
1396         lock->l_blocking_lock = NULL;
1397         lock->l_blocking_ast(lock, &d, (void *)arg, 
1398                              LDLM_CB_BLOCKING);
1399         LDLM_LOCK_PUT(lock);
1400
1401         RETURN(1);
1402 }
1403
1404 static int
1405 ldlm_work_cp_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
1406 {
1407         struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_cp_ast);
1408         ldlm_completion_callback completion_callback;
1409         int rc = 0;
1410         ENTRY;
1411
1412         /* It's possible to receive a completion AST before we've set
1413          * the l_completion_ast pointer: either because the AST arrived
1414          * before the reply, or simply because there's a small race
1415          * window between receiving the reply and finishing the local
1416          * enqueue. (bug 842)
1417          *
1418          * This can't happen with the blocking_ast, however, because we
1419          * will never call the local blocking_ast until we drop our
1420          * reader/writer reference, which we won't do until we get the
1421          * reply and finish enqueueing. */
1422
1423         /* nobody should touch l_cp_ast */
1424         lock_res_and_lock(lock);
1425         list_del_init(&lock->l_cp_ast);
1426         LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1427         /* save l_completion_ast since it can be changed by
1428          * mds_intent_policy(), see bug 14225 */
1429         completion_callback = lock->l_completion_ast;
1430         lock->l_flags &= ~LDLM_FL_CP_REQD;
1431         unlock_res_and_lock(lock);
1432
1433         if (completion_callback != NULL) {
1434                 completion_callback(lock, 0, (void *)arg);
1435                 rc = 1;
1436         }
1437         LDLM_LOCK_PUT(lock);
1438
1439         RETURN(rc);
1440 }
1441
1442 static int
1443 ldlm_work_revoke_ast_lock(struct list_head *tmp, struct ldlm_cb_set_arg *arg)
1444 {
1445         struct ldlm_lock_desc desc;
1446         struct ldlm_lock *lock = list_entry(tmp, struct ldlm_lock, l_rk_ast);
1447         ENTRY;
1448
1449         list_del_init(&lock->l_rk_ast);
1450
1451         /* the desc just pretend to exclusive */
1452         ldlm_lock2desc(lock, &desc);
1453         desc.l_req_mode = LCK_EX;
1454         desc.l_granted_mode = 0;
1455
1456         lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1457         LDLM_LOCK_PUT(lock);
1458
1459         RETURN(1);
1460 }
1461
1462 int ldlm_run_ast_work(struct list_head *rpc_list, ldlm_desc_ast_t ast_type)
1463 {
1464         struct ldlm_cb_set_arg arg;
1465         struct list_head *tmp, *pos;
1466         int (*work_ast_lock)(struct list_head *tmp, struct ldlm_cb_set_arg *arg);
1467         int ast_count;
1468         ENTRY;
1469
1470         arg.set = ptlrpc_prep_set();
1471         atomic_set(&arg.restart, 0);
1472         switch (ast_type) {
1473         case LDLM_WORK_BL_AST:
1474                 arg.type = LDLM_BL_CALLBACK;
1475                 work_ast_lock = ldlm_work_bl_ast_lock;
1476                 break;
1477         case LDLM_WORK_CP_AST:
1478                 arg.type = LDLM_CP_CALLBACK;
1479                 work_ast_lock = ldlm_work_cp_ast_lock;
1480                 break;
1481         case LDLM_WORK_REVOKE_AST:
1482                 arg.type = LDLM_BL_CALLBACK;
1483                 work_ast_lock = ldlm_work_revoke_ast_lock;
1484                 break;
1485         default:
1486                 LBUG();
1487         }
1488
1489         ast_count = 0;
1490         list_for_each_safe(tmp, pos, rpc_list) {
1491                 ast_count += work_ast_lock(tmp, &arg);
1492
1493                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1494                  * and create a new set for requests that remained in
1495                  * @rpc_list */
1496                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1497                         ldlm_send_and_maybe_create_set(&arg, 1);
1498                         ast_count = 0;
1499                 }
1500         }
1501
1502         if (ast_count > 0)
1503                 ldlm_send_and_maybe_create_set(&arg, 0);
1504         else
1505                 /* In case when number of ASTs is multiply of
1506                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1507                  * @arg.set must be destroyed here, otherwise we get 
1508                  * write memory leaking. */
1509                 ptlrpc_set_destroy(arg.set);
1510
1511         RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
1512 }
1513
1514 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1515 {
1516         ldlm_reprocess_all(res);
1517         return LDLM_ITER_CONTINUE;
1518 }
1519
1520 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1521 {
1522         struct list_head *tmp;
1523         int i, rc;
1524
1525         if (ns == NULL)
1526                 return;
1527
1528         ENTRY;
1529         spin_lock(&ns->ns_hash_lock);
1530         for (i = 0; i < RES_HASH_SIZE; i++) {
1531                 tmp = ns->ns_hash[i].next;
1532                 while (tmp != &(ns->ns_hash[i])) {
1533                         struct ldlm_resource *res =
1534                                 list_entry(tmp, struct ldlm_resource, lr_hash);
1535
1536                         ldlm_resource_getref(res);
1537                         spin_unlock(&ns->ns_hash_lock);
1538
1539                         rc = reprocess_one_queue(res, NULL);
1540
1541                         spin_lock(&ns->ns_hash_lock);
1542                         tmp = tmp->next;
1543                         ldlm_resource_putref_locked(res);
1544
1545                         if (rc == LDLM_ITER_STOP)
1546                                 GOTO(out, rc);
1547                 }
1548         }
1549  out:
1550         spin_unlock(&ns->ns_hash_lock);
1551         EXIT;
1552 }
1553
1554 void ldlm_reprocess_all(struct ldlm_resource *res)
1555 {
1556         CFS_LIST_HEAD(rpc_list);
1557         int rc;
1558         ENTRY;
1559
1560         /* Local lock trees don't get reprocessed. */
1561         if (ns_is_client(res->lr_namespace)) {
1562                 EXIT;
1563                 return;
1564         }
1565
1566  restart:
1567         lock_res(res);
1568         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1569         if (rc == LDLM_ITER_CONTINUE)
1570                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1571         unlock_res(res);
1572
1573         rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1574         if (rc == -ERESTART) {
1575                 LASSERT(list_empty(&rpc_list));
1576                 goto restart;
1577         }
1578         EXIT;
1579 }
1580
1581 void ldlm_cancel_callback(struct ldlm_lock *lock)
1582 {
1583         check_res_locked(lock->l_resource);
1584         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1585                 lock->l_flags |= LDLM_FL_CANCEL;
1586                 if (lock->l_blocking_ast) {
1587                         // l_check_no_ns_lock(ns);
1588                         unlock_res_and_lock(lock);
1589                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1590                                              LDLM_CB_CANCELING);
1591                         lock_res_and_lock(lock);
1592                 } else {
1593                         LDLM_DEBUG(lock, "no blocking ast");
1594                 }
1595         }
1596         lock->l_flags |= LDLM_FL_BL_DONE;
1597 }
1598
1599 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1600 {
1601         if (req->l_resource->lr_type != LDLM_PLAIN &&
1602             req->l_resource->lr_type != LDLM_IBITS)
1603                 return;
1604
1605         list_del_init(&req->l_sl_policy);
1606         list_del_init(&req->l_sl_mode);
1607 }
1608
1609 void ldlm_lock_cancel(struct ldlm_lock *lock)
1610 {
1611         struct ldlm_resource *res;
1612         struct ldlm_namespace *ns;
1613         ENTRY;
1614
1615         lock_res_and_lock(lock);
1616
1617         res = lock->l_resource;
1618         ns = res->lr_namespace;
1619
1620         /* Please do not, no matter how tempting, remove this LBUG without
1621          * talking to me first. -phik */
1622         if (lock->l_readers || lock->l_writers) {
1623                 LDLM_ERROR(lock, "lock still has references");
1624                 LBUG();
1625         }
1626
1627         ldlm_del_waiting_lock(lock);
1628
1629         /* Releases cancel callback. */
1630         ldlm_cancel_callback(lock);
1631
1632         /* Yes, second time, just in case it was added again while we were
1633            running with no res lock in ldlm_cancel_callback */
1634         ldlm_del_waiting_lock(lock); 
1635         ldlm_resource_unlink_lock(lock);
1636         ldlm_lock_destroy_nolock(lock);
1637
1638         if (lock->l_granted_mode == lock->l_req_mode)
1639                 ldlm_pool_del(&ns->ns_pool, lock);
1640
1641         /* Make sure we will not be called again for same lock what is possible
1642          * if not to zero out lock->l_granted_mode */
1643         lock->l_granted_mode = LCK_MINMODE;
1644         unlock_res_and_lock(lock);
1645
1646         EXIT;
1647 }
1648
1649 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1650 {
1651         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1652         ENTRY;
1653
1654         if (lock == NULL)
1655                 RETURN(-EINVAL);
1656
1657         lock->l_ast_data = data;
1658         LDLM_LOCK_PUT(lock);
1659         RETURN(0);
1660 }
1661
1662 void ldlm_cancel_locks_for_export_cb(void *obj, void *data)
1663 {
1664         struct obd_export    *exp = data;
1665         struct ldlm_lock     *lock = obj;
1666         struct ldlm_resource *res;
1667
1668         res = ldlm_resource_getref(lock->l_resource);
1669         LDLM_LOCK_GET(lock);
1670
1671         LDLM_DEBUG(lock, "export %p", exp);
1672         ldlm_res_lvbo_update(res, NULL, 0, 1);
1673         ldlm_lock_cancel(lock);
1674         ldlm_reprocess_all(res);
1675         ldlm_resource_putref(res);
1676         LDLM_LOCK_PUT(lock);
1677 }
1678
1679 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1680 {
1681         lustre_hash_for_each_empty(exp->exp_lock_hash,
1682                                    ldlm_cancel_locks_for_export_cb, exp);
1683 }
1684
1685 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1686                                         __u32 *flags)
1687 {
1688         CFS_LIST_HEAD(rpc_list);
1689         struct ldlm_resource *res;
1690         struct ldlm_namespace *ns;
1691         int granted = 0;
1692         int old_mode, rc;
1693         struct sl_insert_point prev;
1694         ldlm_error_t err;
1695         struct ldlm_interval *node;
1696         ENTRY;
1697
1698         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1699                 *flags |= LDLM_FL_BLOCK_GRANTED;
1700                 RETURN(lock->l_resource);
1701         }
1702
1703         /* I can't check the type of lock here because the bitlock of lock
1704          * is not held here, so do the allocation blindly. -jay */
1705         OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node));
1706         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
1707                 RETURN(NULL);
1708
1709         LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1710                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1711
1712         lock_res_and_lock(lock);
1713
1714         res = lock->l_resource;
1715         ns = res->lr_namespace;
1716
1717         old_mode = lock->l_req_mode;
1718         lock->l_req_mode = new_mode;
1719         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1720                 /* remember the lock position where the lock might be 
1721                  * added back to the granted list later and also 
1722                  * remember the join mode for skiplist fixing. */
1723                 prev.res_link = lock->l_res_link.prev;
1724                 prev.mode_link = lock->l_sl_mode.prev;
1725                 prev.policy_link = lock->l_sl_policy.prev;
1726                 ldlm_resource_unlink_lock(lock);
1727         } else {
1728                 ldlm_resource_unlink_lock(lock);
1729                 if (res->lr_type == LDLM_EXTENT) {
1730                         /* FIXME: ugly code, I have to attach the lock to a 
1731                          * interval node again since perhaps it will be granted
1732                          * soon */
1733                         CFS_INIT_LIST_HEAD(&node->li_group);
1734                         ldlm_interval_attach(node, lock);
1735                         node = NULL;
1736                 }
1737         }
1738
1739         /* If this is a local resource, put it on the appropriate list. */
1740         if (ns_is_client(res->lr_namespace)) {
1741                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1742                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1743                 } else {
1744                         /* This should never happen, because of the way the
1745                          * server handles conversions. */
1746                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1747                                    *flags);
1748                         LBUG();
1749
1750                         ldlm_grant_lock(lock, &rpc_list);
1751                         granted = 1;
1752                         /* FIXME: completion handling not with ns_lock held ! */
1753                         if (lock->l_completion_ast)
1754                                 lock->l_completion_ast(lock, 0, NULL);
1755                 }
1756         } else {
1757                 int pflags = 0;
1758                 ldlm_processing_policy policy;
1759                 policy = ldlm_processing_policy_table[res->lr_type];
1760                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1761                 if (rc == LDLM_ITER_STOP) {
1762                         lock->l_req_mode = old_mode;
1763                         if (res->lr_type == LDLM_EXTENT)
1764                                 ldlm_extent_add_lock(res, lock);
1765                         else
1766                                 ldlm_granted_list_add_lock(lock, &prev);
1767
1768                         res = NULL;
1769                 } else {
1770                         *flags |= LDLM_FL_BLOCK_GRANTED;
1771                         granted = 1;
1772                 }
1773         }
1774         unlock_res_and_lock(lock);
1775
1776         if (granted)
1777                 ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1778         if (node)
1779                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1780         RETURN(res);
1781 }
1782
1783 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1784 {
1785         struct obd_device *obd = NULL;
1786
1787         if (!((libcfs_debug | D_ERROR) & level))
1788                 return;
1789
1790         if (!lock) {
1791                 CDEBUG(level, "  NULL LDLM lock\n");
1792                 return;
1793         }
1794
1795         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1796                lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1797                pos, lock->l_pid);
1798         if (lock->l_conn_export != NULL)
1799                 obd = lock->l_conn_export->exp_obd;
1800         if (lock->l_export && lock->l_export->exp_connection) {
1801                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1802                      libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1803                      lock->l_remote_handle.cookie);
1804         } else if (obd == NULL) {
1805                 CDEBUG(level, "  Node: local\n");
1806         } else {
1807                 struct obd_import *imp = obd->u.cli.cl_import;
1808                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1809                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1810                        lock->l_remote_handle.cookie);
1811         }
1812         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1813                   lock->l_resource,
1814                   lock->l_resource->lr_name.name[0],
1815                   lock->l_resource->lr_name.name[1],
1816                   lock->l_resource->lr_name.name[2]);
1817         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1818                "write: %d flags: %#x\n", ldlm_lockname[lock->l_req_mode],
1819                ldlm_lockname[lock->l_granted_mode],
1820                atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1821                lock->l_flags);
1822         if (lock->l_resource->lr_type == LDLM_EXTENT)
1823                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1824                        " (req "LPU64"-"LPU64")\n",
1825                        lock->l_policy_data.l_extent.start,
1826                        lock->l_policy_data.l_extent.end,
1827                        lock->l_req_extent.start, lock->l_req_extent.end);
1828         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1829                 CDEBUG(level, "  Pid: %d Extent: "LPU64" -> "LPU64"\n",
1830                        lock->l_policy_data.l_flock.pid,
1831                        lock->l_policy_data.l_flock.start,
1832                        lock->l_policy_data.l_flock.end);
1833        else if (lock->l_resource->lr_type == LDLM_IBITS)
1834                 CDEBUG(level, "  Bits: "LPX64"\n",
1835                        lock->l_policy_data.l_inodebits.bits);
1836 }
1837
1838 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1839 {
1840         struct ldlm_lock *lock;
1841
1842         if (!((libcfs_debug | D_ERROR) & level))
1843                 return;
1844
1845         lock = ldlm_handle2lock(lockh);
1846         if (lock == NULL)
1847                 return;
1848
1849         ldlm_lock_dump(D_OTHER, lock, 0);
1850
1851         LDLM_LOCK_PUT(lock);
1852 }
1853
1854 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1855                       struct libcfs_debug_msg_data *data, const char *fmt,
1856                       ...)
1857 {
1858         va_list args;
1859         cfs_debug_limit_state_t *cdls = data->msg_cdls;
1860         
1861         va_start(args, fmt);
1862
1863         if (lock->l_resource == NULL) {
1864                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1865                                    data->msg_fn, data->msg_line, fmt, args,
1866                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1867                        "res: \?\? rrc=\?\? type: \?\?\? flags: %x remote: "
1868                        LPX64" expref: %d pid: %u\n", lock,
1869                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1870                        lock->l_readers, lock->l_writers,
1871                        ldlm_lockname[lock->l_granted_mode],
1872                        ldlm_lockname[lock->l_req_mode],
1873                        lock->l_flags, lock->l_remote_handle.cookie,
1874                        lock->l_export ?
1875                        atomic_read(&lock->l_export->exp_refcount) : -99,
1876                        lock->l_pid);
1877                 va_end(args);
1878                 return;
1879         }
1880
1881         switch (lock->l_resource->lr_type) {
1882         case LDLM_EXTENT:
1883                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1884                                    data->msg_fn, data->msg_line, fmt, args,
1885                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1886                        "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1887                        "] (req "LPU64"->"LPU64") flags: %x remote: "LPX64
1888                        " expref: %d pid: %u\n",
1889                        lock->l_resource->lr_namespace->ns_name, lock,
1890                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1891                        lock->l_readers, lock->l_writers,
1892                        ldlm_lockname[lock->l_granted_mode],
1893                        ldlm_lockname[lock->l_req_mode],
1894                        lock->l_resource->lr_name.name[0],
1895                        lock->l_resource->lr_name.name[1],
1896                        atomic_read(&lock->l_resource->lr_refcount),
1897                        ldlm_typename[lock->l_resource->lr_type],
1898                        lock->l_policy_data.l_extent.start,
1899                        lock->l_policy_data.l_extent.end,
1900                        lock->l_req_extent.start, lock->l_req_extent.end,
1901                        lock->l_flags, lock->l_remote_handle.cookie,
1902                        lock->l_export ?
1903                        atomic_read(&lock->l_export->exp_refcount) : -99,
1904                        lock->l_pid);
1905                 break;
1906
1907         case LDLM_FLOCK:
1908                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1909                                    data->msg_fn, data->msg_line, fmt, args,
1910                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1911                        "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1912                        "["LPU64"->"LPU64"] flags: %x remote: "LPX64
1913                        " expref: %d pid: %u\n",
1914                        lock->l_resource->lr_namespace->ns_name, lock,
1915                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1916                        lock->l_readers, lock->l_writers,
1917                        ldlm_lockname[lock->l_granted_mode],
1918                        ldlm_lockname[lock->l_req_mode],
1919                        lock->l_resource->lr_name.name[0],
1920                        lock->l_resource->lr_name.name[1],
1921                        atomic_read(&lock->l_resource->lr_refcount),
1922                        ldlm_typename[lock->l_resource->lr_type],
1923                        lock->l_policy_data.l_flock.pid,
1924                        lock->l_policy_data.l_flock.start,
1925                        lock->l_policy_data.l_flock.end,
1926                        lock->l_flags, lock->l_remote_handle.cookie,
1927                        lock->l_export ?
1928                        atomic_read(&lock->l_export->exp_refcount) : -99,
1929                        lock->l_pid);
1930                 break;
1931
1932         case LDLM_IBITS:
1933                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1934                                    data->msg_fn, data->msg_line, fmt, args,
1935                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1936                        "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1937                        "flags: %x remote: "LPX64" expref: %d "
1938                        "pid %u\n",
1939                        lock->l_resource->lr_namespace->ns_name,
1940                        lock, lock->l_handle.h_cookie,
1941                        atomic_read (&lock->l_refc),
1942                        lock->l_readers, lock->l_writers,
1943                        ldlm_lockname[lock->l_granted_mode],
1944                        ldlm_lockname[lock->l_req_mode],
1945                        lock->l_resource->lr_name.name[0],
1946                        lock->l_resource->lr_name.name[1],
1947                        lock->l_policy_data.l_inodebits.bits,
1948                        atomic_read(&lock->l_resource->lr_refcount),
1949                        ldlm_typename[lock->l_resource->lr_type],
1950                        lock->l_flags, lock->l_remote_handle.cookie,
1951                        lock->l_export ?
1952                        atomic_read(&lock->l_export->exp_refcount) : -99,
1953                        lock->l_pid);
1954                 break;
1955
1956         default:
1957                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1958                                    data->msg_fn, data->msg_line, fmt, args,
1959                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1960                        "res: "LPU64"/"LPU64" rrc: %d type: %s flags: %x "
1961                        "remote: "LPX64" expref: %d pid: %u\n",
1962                        lock->l_resource->lr_namespace->ns_name,
1963                        lock, lock->l_handle.h_cookie,
1964                        atomic_read (&lock->l_refc),
1965                        lock->l_readers, lock->l_writers,
1966                        ldlm_lockname[lock->l_granted_mode],
1967                        ldlm_lockname[lock->l_req_mode],
1968                        lock->l_resource->lr_name.name[0],
1969                        lock->l_resource->lr_name.name[1],
1970                        atomic_read(&lock->l_resource->lr_refcount),
1971                        ldlm_typename[lock->l_resource->lr_type],
1972                        lock->l_flags, lock->l_remote_handle.cookie,
1973                        lock->l_export ?
1974                        atomic_read(&lock->l_export->exp_refcount) : -99,
1975                        lock->l_pid);
1976                 break;
1977         }
1978         va_end(args);
1979 }
1980 EXPORT_SYMBOL(_ldlm_lock_debug);