Whamcloud - gitweb
b24336 ldlm_resource::lr_lvb_data is protected by wrong lock
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lock.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 # ifndef HAVE_VFS_INTENT_PATCHES
47 # include <linux/lustre_intent.h>
48 # endif
49 #else
50 # include <liblustre.h>
51 #endif
52
53 #include <obd_class.h>
54 #include "ldlm_internal.h"
55
56 /* lock types */
57 char *ldlm_lockname[] = {
58         [0] "--",
59         [LCK_EX] "EX",
60         [LCK_PW] "PW",
61         [LCK_PR] "PR",
62         [LCK_CW] "CW",
63         [LCK_CR] "CR",
64         [LCK_NL] "NL",
65         [LCK_GROUP] "GROUP",
66         [LCK_COS] "COS"
67 };
68
69 char *ldlm_typename[] = {
70         [LDLM_PLAIN] "PLN",
71         [LDLM_EXTENT] "EXT",
72         [LDLM_FLOCK] "FLK",
73         [LDLM_IBITS] "IBT",
74 };
75
76 char *ldlm_it2str(int it)
77 {
78         switch (it) {
79         case IT_OPEN:
80                 return "open";
81         case IT_CREAT:
82                 return "creat";
83         case (IT_OPEN | IT_CREAT):
84                 return "open|creat";
85         case IT_READDIR:
86                 return "readdir";
87         case IT_GETATTR:
88                 return "getattr";
89         case IT_LOOKUP:
90                 return "lookup";
91         case IT_UNLINK:
92                 return "unlink";
93         case IT_GETXATTR:
94                 return "getxattr";
95         default:
96                 CERROR("Unknown intent %d\n", it);
97                 return "UNKNOWN";
98         }
99 }
100
101 extern cfs_mem_cache_t *ldlm_lock_slab;
102
103 static ldlm_processing_policy ldlm_processing_policy_table[] = {
104         [LDLM_PLAIN] ldlm_process_plain_lock,
105         [LDLM_EXTENT] ldlm_process_extent_lock,
106 #ifdef __KERNEL__
107         [LDLM_FLOCK] ldlm_process_flock_lock,
108 #endif
109         [LDLM_IBITS] ldlm_process_inodebits_lock,
110 };
111
112 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
113 {
114         return ldlm_processing_policy_table[res->lr_type];
115 }
116
117 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
118 {
119         ns->ns_policy = arg;
120 }
121
122 /*
123  * REFCOUNTED LOCK OBJECTS
124  */
125
126
127 /*
128  * Lock refcounts, during creation:
129  *   - one special one for allocation, dec'd only once in destroy
130  *   - one for being a lock that's in-use
131  *   - one for the addref associated with a new lock
132  */
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
134 {
135         cfs_atomic_inc(&lock->l_refc);
136         return lock;
137 }
138
139 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
140 {
141         LASSERT(size == sizeof(*lock));
142         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
143 }
144
145 void ldlm_lock_put(struct ldlm_lock *lock)
146 {
147         ENTRY;
148
149         LASSERT(lock->l_resource != LP_POISON);
150         LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
151         if (cfs_atomic_dec_and_test(&lock->l_refc)) {
152                 struct ldlm_resource *res;
153
154                 LDLM_DEBUG(lock,
155                            "final lock_put on destroyed lock, freeing it.");
156
157                 res = lock->l_resource;
158                 LASSERT(lock->l_destroyed);
159                 LASSERT(cfs_list_empty(&lock->l_res_link));
160                 LASSERT(cfs_list_empty(&lock->l_pending_chain));
161
162                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
163                                      LDLM_NSS_LOCKS);
164                 lu_ref_del(&res->lr_reference, "lock", lock);
165                 ldlm_resource_putref(res);
166                 lock->l_resource = NULL;
167                 if (lock->l_export) {
168                         class_export_lock_put(lock->l_export, lock);
169                         lock->l_export = NULL;
170                 }
171
172                 if (lock->l_lvb_data != NULL)
173                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
174
175                 ldlm_interval_free(ldlm_interval_detach(lock));
176                 lu_ref_fini(&lock->l_reference);
177                 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle,
178                                 ldlm_lock_free);
179         }
180
181         EXIT;
182 }
183
184 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
185 {
186         int rc = 0;
187         if (!cfs_list_empty(&lock->l_lru)) {
188                 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
189
190                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
191                 cfs_list_del_init(&lock->l_lru);
192                 if (lock->l_flags & LDLM_FL_SKIPPED)
193                         lock->l_flags &= ~LDLM_FL_SKIPPED;
194                 LASSERT(ns->ns_nr_unused > 0);
195                 ns->ns_nr_unused--;
196                 rc = 1;
197         }
198         return rc;
199 }
200
201 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
202 {
203         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
204         int rc;
205
206         ENTRY;
207         if (lock->l_ns_srv) {
208                 LASSERT(cfs_list_empty(&lock->l_lru));
209                 RETURN(0);
210         }
211
212         cfs_spin_lock(&ns->ns_lock);
213         rc = ldlm_lock_remove_from_lru_nolock(lock);
214         cfs_spin_unlock(&ns->ns_lock);
215         EXIT;
216         return rc;
217 }
218
219 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
220 {
221         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
222
223         lock->l_last_used = cfs_time_current();
224         LASSERT(cfs_list_empty(&lock->l_lru));
225         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
226         cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
227         LASSERT(ns->ns_nr_unused >= 0);
228         ns->ns_nr_unused++;
229 }
230
231 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
232 {
233         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
234
235         ENTRY;
236         cfs_spin_lock(&ns->ns_lock);
237         ldlm_lock_add_to_lru_nolock(lock);
238         cfs_spin_unlock(&ns->ns_lock);
239         EXIT;
240 }
241
242 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
243 {
244         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
245
246         ENTRY;
247         if (lock->l_ns_srv) {
248                 LASSERT(cfs_list_empty(&lock->l_lru));
249                 EXIT;
250                 return;
251         }
252
253         cfs_spin_lock(&ns->ns_lock);
254         if (!cfs_list_empty(&lock->l_lru)) {
255                 ldlm_lock_remove_from_lru_nolock(lock);
256                 ldlm_lock_add_to_lru_nolock(lock);
257         }
258         cfs_spin_unlock(&ns->ns_lock);
259         EXIT;
260 }
261
262 /* This used to have a 'strict' flag, which recovery would use to mark an
263  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
264  * shall explain why it's gone: with the new hash table scheme, once you call
265  * ldlm_lock_destroy, you can never drop your final references on this lock.
266  * Because it's not in the hash table anymore.  -phil */
267 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
268 {
269         ENTRY;
270
271         if (lock->l_readers || lock->l_writers) {
272                 LDLM_ERROR(lock, "lock still has references");
273                 ldlm_lock_dump(D_ERROR, lock, 0);
274                 LBUG();
275         }
276
277         if (!cfs_list_empty(&lock->l_res_link)) {
278                 LDLM_ERROR(lock, "lock still on resource");
279                 ldlm_lock_dump(D_ERROR, lock, 0);
280                 LBUG();
281         }
282
283         if (lock->l_destroyed) {
284                 LASSERT(cfs_list_empty(&lock->l_lru));
285                 EXIT;
286                 return 0;
287         }
288         lock->l_destroyed = 1;
289
290         if (lock->l_export && lock->l_export->exp_lock_hash &&
291             !cfs_hlist_unhashed(&lock->l_exp_hash))
292                 cfs_hash_del(lock->l_export->exp_lock_hash,
293                              &lock->l_remote_handle, &lock->l_exp_hash);
294
295         ldlm_lock_remove_from_lru(lock);
296         class_handle_unhash(&lock->l_handle);
297
298 #if 0
299         /* Wake anyone waiting for this lock */
300         /* FIXME: I should probably add yet another flag, instead of using
301          * l_export to only call this on clients */
302         if (lock->l_export)
303                 class_export_put(lock->l_export);
304         lock->l_export = NULL;
305         if (lock->l_export && lock->l_completion_ast)
306                 lock->l_completion_ast(lock, 0);
307 #endif
308         EXIT;
309         return 1;
310 }
311
312 void ldlm_lock_destroy(struct ldlm_lock *lock)
313 {
314         int first;
315         ENTRY;
316         lock_res_and_lock(lock);
317         first = ldlm_lock_destroy_internal(lock);
318         unlock_res_and_lock(lock);
319
320         /* drop reference from hashtable only for first destroy */
321         if (first) {
322                 lu_ref_del(&lock->l_reference, "hash", lock);
323                 LDLM_LOCK_RELEASE(lock);
324         }
325         EXIT;
326 }
327
328 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
329 {
330         int first;
331         ENTRY;
332         first = ldlm_lock_destroy_internal(lock);
333         /* drop reference from hashtable only for first destroy */
334         if (first) {
335                 lu_ref_del(&lock->l_reference, "hash", lock);
336                 LDLM_LOCK_RELEASE(lock);
337         }
338         EXIT;
339 }
340
341 /* this is called by portals_handle2object with the handle lock taken */
342 static void lock_handle_addref(void *lock)
343 {
344         LDLM_LOCK_GET((struct ldlm_lock *)lock);
345 }
346
347 /*
348  * usage: pass in a resource on which you have done ldlm_resource_get
349  *        new lock will take over the refcount.
350  * returns: lock with refcount 2 - one for current caller and one for remote
351  */
352 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
353 {
354         struct ldlm_lock *lock;
355         ENTRY;
356
357         if (resource == NULL)
358                 LBUG();
359
360         OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO);
361         if (lock == NULL)
362                 RETURN(NULL);
363
364         cfs_spin_lock_init(&lock->l_lock);
365         lock->l_resource = resource;
366         lu_ref_add(&resource->lr_reference, "lock", lock);
367
368         cfs_atomic_set(&lock->l_refc, 2);
369         CFS_INIT_LIST_HEAD(&lock->l_res_link);
370         CFS_INIT_LIST_HEAD(&lock->l_lru);
371         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
372         CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
373         CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
374         CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
375         cfs_waitq_init(&lock->l_waitq);
376         lock->l_blocking_lock = NULL;
377         CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
378         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
379         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
380
381         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
382                              LDLM_NSS_LOCKS);
383         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
384         class_handle_hash(&lock->l_handle, lock_handle_addref);
385
386         lu_ref_init(&lock->l_reference);
387         lu_ref_add(&lock->l_reference, "hash", lock);
388         lock->l_callback_timeout = 0;
389
390 #if LUSTRE_TRACKS_LOCK_EXP_REFS
391         CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
392         lock->l_exp_refs_nr = 0;
393         lock->l_exp_refs_target = NULL;
394 #endif
395
396         RETURN(lock);
397 }
398
399 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
400                               const struct ldlm_res_id *new_resid)
401 {
402         struct ldlm_resource *oldres = lock->l_resource;
403         struct ldlm_resource *newres;
404         int type;
405         ENTRY;
406
407         LASSERT(ns_is_client(ns));
408
409         lock_res_and_lock(lock);
410         if (memcmp(new_resid, &lock->l_resource->lr_name,
411                    sizeof(lock->l_resource->lr_name)) == 0) {
412                 /* Nothing to do */
413                 unlock_res_and_lock(lock);
414                 RETURN(0);
415         }
416
417         LASSERT(new_resid->name[0] != 0);
418
419         /* This function assumes that the lock isn't on any lists */
420         LASSERT(cfs_list_empty(&lock->l_res_link));
421
422         type = oldres->lr_type;
423         unlock_res_and_lock(lock);
424
425         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
426         if (newres == NULL)
427                 RETURN(-ENOMEM);
428
429         lu_ref_add(&newres->lr_reference, "lock", lock);
430         /*
431          * To flip the lock from the old to the new resource, lock, oldres and
432          * newres have to be locked. Resource spin-locks are nested within
433          * lock->l_lock, and are taken in the memory address order to avoid
434          * dead-locks.
435          */
436         cfs_spin_lock(&lock->l_lock);
437         oldres = lock->l_resource;
438         if (oldres < newres) {
439                 lock_res(oldres);
440                 lock_res_nested(newres, LRT_NEW);
441         } else {
442                 lock_res(newres);
443                 lock_res_nested(oldres, LRT_NEW);
444         }
445         LASSERT(memcmp(new_resid, &oldres->lr_name,
446                        sizeof oldres->lr_name) != 0);
447         lock->l_resource = newres;
448         unlock_res(oldres);
449         unlock_res_and_lock(lock);
450
451         /* ...and the flowers are still standing! */
452         lu_ref_del(&oldres->lr_reference, "lock", lock);
453         ldlm_resource_putref(oldres);
454
455         RETURN(0);
456 }
457
458 /*
459  *  HANDLES
460  */
461
462 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
463 {
464         lockh->cookie = lock->l_handle.h_cookie;
465 }
466
467 /* if flags: atomically get the lock and set the flags.
468  *           Return NULL if flag already set
469  */
470
471 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
472                                      int flags)
473 {
474         struct ldlm_lock *lock;
475         ENTRY;
476
477         LASSERT(handle);
478
479         lock = class_handle2object(handle->cookie);
480         if (lock == NULL)
481                 RETURN(NULL);
482
483         /* It's unlikely but possible that someone marked the lock as
484          * destroyed after we did handle2object on it */
485         if (flags == 0 && !lock->l_destroyed) {
486                 lu_ref_add(&lock->l_reference, "handle", cfs_current());
487                 RETURN(lock);
488         }
489
490         lock_res_and_lock(lock);
491
492         LASSERT(lock->l_resource != NULL);
493
494         lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
495         if (unlikely(lock->l_destroyed)) {
496                 unlock_res_and_lock(lock);
497                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
498                 LDLM_LOCK_PUT(lock);
499                 RETURN(NULL);
500         }
501
502         if (flags && (lock->l_flags & flags)) {
503                 unlock_res_and_lock(lock);
504                 LDLM_LOCK_PUT(lock);
505                 RETURN(NULL);
506         }
507
508         if (flags)
509                 lock->l_flags |= flags;
510
511         unlock_res_and_lock(lock);
512         RETURN(lock);
513 }
514
515 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
516 {
517         struct obd_export *exp = lock->l_export?:lock->l_conn_export;
518         /* INODEBITS_INTEROP: If the other side does not support
519          * inodebits, reply with a plain lock descriptor.
520          */
521         if ((lock->l_resource->lr_type == LDLM_IBITS) &&
522             (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
523                 /* Make sure all the right bits are set in this lock we
524                    are going to pass to client */
525                 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
526                          (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
527                          "Inappropriate inode lock bits during "
528                          "conversion " LPU64 "\n",
529                          lock->l_policy_data.l_inodebits.bits);
530
531                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
532                 desc->l_resource.lr_type = LDLM_PLAIN;
533
534                 /* Convert "new" lock mode to something old client can
535                    understand */
536                 if ((lock->l_req_mode == LCK_CR) ||
537                     (lock->l_req_mode == LCK_CW))
538                         desc->l_req_mode = LCK_PR;
539                 else
540                         desc->l_req_mode = lock->l_req_mode;
541                 if ((lock->l_granted_mode == LCK_CR) ||
542                     (lock->l_granted_mode == LCK_CW)) {
543                         desc->l_granted_mode = LCK_PR;
544                 } else {
545                         /* We never grant PW/EX locks to clients */
546                         LASSERT((lock->l_granted_mode != LCK_PW) &&
547                                 (lock->l_granted_mode != LCK_EX));
548                         desc->l_granted_mode = lock->l_granted_mode;
549                 }
550
551                 /* We do not copy policy here, because there is no
552                    policy for plain locks */
553         } else {
554                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
555                 desc->l_req_mode = lock->l_req_mode;
556                 desc->l_granted_mode = lock->l_granted_mode;
557                 desc->l_policy_data = lock->l_policy_data;
558         }
559 }
560
561 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
562                            cfs_list_t *work_list)
563 {
564         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
565                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
566                 lock->l_flags |= LDLM_FL_AST_SENT;
567                 /* If the enqueuing client said so, tell the AST recipient to
568                  * discard dirty data, rather than writing back. */
569                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
570                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
571                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
572                 cfs_list_add(&lock->l_bl_ast, work_list);
573                 LDLM_LOCK_GET(lock);
574                 LASSERT(lock->l_blocking_lock == NULL);
575                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
576         }
577 }
578
579 void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
580 {
581         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
582                 lock->l_flags |= LDLM_FL_CP_REQD;
583                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
584                 LASSERT(cfs_list_empty(&lock->l_cp_ast));
585                 cfs_list_add(&lock->l_cp_ast, work_list);
586                 LDLM_LOCK_GET(lock);
587         }
588 }
589
590 /* must be called with lr_lock held */
591 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
592                             cfs_list_t *work_list)
593 {
594         ENTRY;
595         check_res_locked(lock->l_resource);
596         if (new)
597                 ldlm_add_bl_work_item(lock, new, work_list);
598         else
599                 ldlm_add_cp_work_item(lock, work_list);
600         EXIT;
601 }
602
603 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
604 {
605         struct ldlm_lock *lock;
606
607         lock = ldlm_handle2lock(lockh);
608         LASSERT(lock != NULL);
609         ldlm_lock_addref_internal(lock, mode);
610         LDLM_LOCK_PUT(lock);
611 }
612
613 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
614 {
615         ldlm_lock_remove_from_lru(lock);
616         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
617                 lock->l_readers++;
618                 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
619         }
620         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
621                 lock->l_writers++;
622                 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
623         }
624         LDLM_LOCK_GET(lock);
625         lu_ref_add_atomic(&lock->l_reference, "user", lock);
626         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
627 }
628
629 /**
630  * Attempts to addref a lock, and fails if lock is already LDLM_FL_CBPENDING
631  * or destroyed.
632  *
633  * \retval 0 success, lock was addref-ed
634  *
635  * \retval -EAGAIN lock is being canceled.
636  */
637 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
638 {
639         struct ldlm_lock *lock;
640         int               result;
641
642         result = -EAGAIN;
643         lock = ldlm_handle2lock(lockh);
644         if (lock != NULL) {
645                 lock_res_and_lock(lock);
646                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
647                     !(lock->l_flags & LDLM_FL_CBPENDING)) {
648                         ldlm_lock_addref_internal_nolock(lock, mode);
649                         result = 0;
650                 }
651                 unlock_res_and_lock(lock);
652                 LDLM_LOCK_PUT(lock);
653         }
654         return result;
655 }
656
657 /* only called for local locks */
658 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
659 {
660         lock_res_and_lock(lock);
661         ldlm_lock_addref_internal_nolock(lock, mode);
662         unlock_res_and_lock(lock);
663 }
664
665 /* only called in ldlm_flock_destroy and for local locks.
666  *  * for LDLM_FLOCK type locks, l_blocking_ast is null, and
667  *   * ldlm_lock_remove_from_lru() does nothing, it is safe
668  *    * for ldlm_flock_destroy usage by dropping some code */
669 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
670 {
671         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
672         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
673                 LASSERT(lock->l_readers > 0);
674                 lu_ref_del(&lock->l_reference, "reader", lock);
675                 lock->l_readers--;
676         }
677         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
678                 LASSERT(lock->l_writers > 0);
679                 lu_ref_del(&lock->l_reference, "writer", lock);
680                 lock->l_writers--;
681         }
682
683         lu_ref_del(&lock->l_reference, "user", lock);
684         LDLM_LOCK_RELEASE(lock);    /* matches the LDLM_LOCK_GET() in addref */
685 }
686
687 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
688 {
689         struct ldlm_namespace *ns;
690         ENTRY;
691
692         lock_res_and_lock(lock);
693
694         ns = ldlm_lock_to_ns(lock);
695
696         ldlm_lock_decref_internal_nolock(lock, mode);
697
698         if (lock->l_flags & LDLM_FL_LOCAL &&
699             !lock->l_readers && !lock->l_writers) {
700                 /* If this is a local lock on a server namespace and this was
701                  * the last reference, cancel the lock. */
702                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
703                 lock->l_flags |= LDLM_FL_CBPENDING;
704         }
705
706         if (!lock->l_readers && !lock->l_writers &&
707             (lock->l_flags & LDLM_FL_CBPENDING)) {
708                 /* If we received a blocked AST and this was the last reference,
709                  * run the callback. */
710                 if (lock->l_ns_srv && lock->l_export)
711                         CERROR("FL_CBPENDING set on non-local lock--just a "
712                                "warning\n");
713
714                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
715
716                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
717                 ldlm_lock_remove_from_lru(lock);
718                 unlock_res_and_lock(lock);
719
720                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
721                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
722
723                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
724                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
725                         ldlm_handle_bl_callback(ns, NULL, lock);
726         } else if (ns_is_client(ns) &&
727                    !lock->l_readers && !lock->l_writers &&
728                    !(lock->l_flags & LDLM_FL_BL_AST)) {
729                 /* If this is a client-side namespace and this was the last
730                  * reference, put it on the LRU. */
731                 ldlm_lock_add_to_lru(lock);
732                 unlock_res_and_lock(lock);
733
734                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
735                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
736
737                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
738                  * are not supported by the server, otherwise, it is done on
739                  * enqueue. */
740                 if (!exp_connect_cancelset(lock->l_conn_export) &&
741                     !ns_connect_lru_resize(ns))
742                         ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
743         } else {
744                 unlock_res_and_lock(lock);
745         }
746
747         EXIT;
748 }
749
750 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
751 {
752         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
753         LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
754         ldlm_lock_decref_internal(lock, mode);
755         LDLM_LOCK_PUT(lock);
756 }
757
758 /* This will drop a lock reference and mark it for destruction, but will not
759  * necessarily cancel the lock before returning. */
760 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
761 {
762         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
763         ENTRY;
764
765         LASSERT(lock != NULL);
766
767         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
768         lock_res_and_lock(lock);
769         lock->l_flags |= LDLM_FL_CBPENDING;
770         unlock_res_and_lock(lock);
771         ldlm_lock_decref_internal(lock, mode);
772         LDLM_LOCK_PUT(lock);
773 }
774
775 struct sl_insert_point {
776         cfs_list_t *res_link;
777         cfs_list_t *mode_link;
778         cfs_list_t *policy_link;
779 };
780
781 /*
782  * search_granted_lock
783  *
784  * Description:
785  *      Finds a position to insert the new lock.
786  * Parameters:
787  *      queue [input]:  the granted list where search acts on;
788  *      req [input]:    the lock whose position to be located;
789  *      prev [output]:  positions within 3 lists to insert @req to
790  * Return Value:
791  *      filled @prev
792  * NOTE: called by
793  *  - ldlm_grant_lock_with_skiplist
794  */
795 static void search_granted_lock(cfs_list_t *queue,
796                                 struct ldlm_lock *req,
797                                 struct sl_insert_point *prev)
798 {
799         cfs_list_t *tmp;
800         struct ldlm_lock *lock, *mode_end, *policy_end;
801         ENTRY;
802
803         cfs_list_for_each(tmp, queue) {
804                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
805
806                 mode_end = cfs_list_entry(lock->l_sl_mode.prev,
807                                           struct ldlm_lock, l_sl_mode);
808
809                 if (lock->l_req_mode != req->l_req_mode) {
810                         /* jump to last lock of mode group */
811                         tmp = &mode_end->l_res_link;
812                         continue;
813                 }
814
815                 /* suitable mode group is found */
816                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
817                         /* insert point is last lock of the mode group */
818                         prev->res_link = &mode_end->l_res_link;
819                         prev->mode_link = &mode_end->l_sl_mode;
820                         prev->policy_link = &req->l_sl_policy;
821                         EXIT;
822                         return;
823                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
824                         for (;;) {
825                                 policy_end =
826                                         cfs_list_entry(lock->l_sl_policy.prev,
827                                                        struct ldlm_lock,
828                                                        l_sl_policy);
829
830                                 if (lock->l_policy_data.l_inodebits.bits ==
831                                     req->l_policy_data.l_inodebits.bits) {
832                                         /* insert point is last lock of
833                                          * the policy group */
834                                         prev->res_link =
835                                                 &policy_end->l_res_link;
836                                         prev->mode_link =
837                                                 &policy_end->l_sl_mode;
838                                         prev->policy_link =
839                                                 &policy_end->l_sl_policy;
840                                         EXIT;
841                                         return;
842                                 }
843
844                                 if (policy_end == mode_end)
845                                         /* done with mode group */
846                                         break;
847
848                                 /* go to next policy group within mode group */
849                                 tmp = policy_end->l_res_link.next;
850                                 lock = cfs_list_entry(tmp, struct ldlm_lock,
851                                                       l_res_link);
852                         }  /* loop over policy groups within the mode group */
853
854                         /* insert point is last lock of the mode group,
855                          * new policy group is started */
856                         prev->res_link = &mode_end->l_res_link;
857                         prev->mode_link = &mode_end->l_sl_mode;
858                         prev->policy_link = &req->l_sl_policy;
859                         EXIT;
860                         return;
861                 } else {
862                         LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
863                         LBUG();
864                 }
865         }
866
867         /* insert point is last lock on the queue,
868          * new mode group and new policy group are started */
869         prev->res_link = queue->prev;
870         prev->mode_link = &req->l_sl_mode;
871         prev->policy_link = &req->l_sl_policy;
872         EXIT;
873         return;
874 }
875
876 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
877                                        struct sl_insert_point *prev)
878 {
879         struct ldlm_resource *res = lock->l_resource;
880         ENTRY;
881
882         check_res_locked(res);
883
884         ldlm_resource_dump(D_INFO, res);
885         CDEBUG(D_OTHER, "About to add this lock:\n");
886         ldlm_lock_dump(D_OTHER, lock, 0);
887
888         if (lock->l_destroyed) {
889                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
890                 return;
891         }
892
893         LASSERT(cfs_list_empty(&lock->l_res_link));
894         LASSERT(cfs_list_empty(&lock->l_sl_mode));
895         LASSERT(cfs_list_empty(&lock->l_sl_policy));
896
897         cfs_list_add(&lock->l_res_link, prev->res_link);
898         cfs_list_add(&lock->l_sl_mode, prev->mode_link);
899         cfs_list_add(&lock->l_sl_policy, prev->policy_link);
900
901         EXIT;
902 }
903
904 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
905 {
906         struct sl_insert_point prev;
907         ENTRY;
908
909         LASSERT(lock->l_req_mode == lock->l_granted_mode);
910
911         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
912         ldlm_granted_list_add_lock(lock, &prev);
913         EXIT;
914 }
915
916 /* NOTE: called by
917  *  - ldlm_lock_enqueue
918  *  - ldlm_reprocess_queue
919  *  - ldlm_lock_convert
920  *
921  * must be called with lr_lock held
922  */
923 void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
924 {
925         struct ldlm_resource *res = lock->l_resource;
926         ENTRY;
927
928         check_res_locked(res);
929
930         lock->l_granted_mode = lock->l_req_mode;
931         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
932                 ldlm_grant_lock_with_skiplist(lock);
933         else if (res->lr_type == LDLM_EXTENT)
934                 ldlm_extent_add_lock(res, lock);
935         else
936                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
937
938         if (lock->l_granted_mode < res->lr_most_restr)
939                 res->lr_most_restr = lock->l_granted_mode;
940
941         if (work_list && lock->l_completion_ast != NULL)
942                 ldlm_add_ast_work_item(lock, NULL, work_list);
943
944         ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
945         EXIT;
946 }
947
948 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
949  * comment above ldlm_lock_match */
950 static struct ldlm_lock *search_queue(cfs_list_t *queue,
951                                       ldlm_mode_t *mode,
952                                       ldlm_policy_data_t *policy,
953                                       struct ldlm_lock *old_lock,
954                                       int flags, int unref)
955 {
956         struct ldlm_lock *lock;
957         cfs_list_t       *tmp;
958
959         cfs_list_for_each(tmp, queue) {
960                 ldlm_mode_t match;
961
962                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
963
964                 if (lock == old_lock)
965                         break;
966
967                 /* llite sometimes wants to match locks that will be
968                  * canceled when their users drop, but we allow it to match
969                  * if it passes in CBPENDING and the lock still has users.
970                  * this is generally only going to be used by children
971                  * whose parents already hold a lock so forward progress
972                  * can still happen. */
973                 if (lock->l_flags & LDLM_FL_CBPENDING &&
974                     !(flags & LDLM_FL_CBPENDING))
975                         continue;
976                 if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
977                     lock->l_readers == 0 && lock->l_writers == 0)
978                         continue;
979
980                 if (!(lock->l_req_mode & *mode))
981                         continue;
982                 match = lock->l_req_mode;
983
984                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
985                     (lock->l_policy_data.l_extent.start >
986                      policy->l_extent.start ||
987                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
988                         continue;
989
990                 if (unlikely(match == LCK_GROUP) &&
991                     lock->l_resource->lr_type == LDLM_EXTENT &&
992                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
993                         continue;
994
995                 /* We match if we have existing lock with same or wider set
996                    of bits. */
997                 if (lock->l_resource->lr_type == LDLM_IBITS &&
998                      ((lock->l_policy_data.l_inodebits.bits &
999                       policy->l_inodebits.bits) !=
1000                       policy->l_inodebits.bits))
1001                         continue;
1002
1003                 if (!unref &&
1004                     (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED)))
1005                         continue;
1006
1007                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
1008                     !(lock->l_flags & LDLM_FL_LOCAL))
1009                         continue;
1010
1011                 if (flags & LDLM_FL_TEST_LOCK) {
1012                         LDLM_LOCK_GET(lock);
1013                         ldlm_lock_touch_in_lru(lock);
1014                 } else {
1015                         ldlm_lock_addref_internal_nolock(lock, match);
1016                 }
1017                 *mode = match;
1018                 return lock;
1019         }
1020
1021         return NULL;
1022 }
1023
1024 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1025 {
1026         lock->l_flags |= LDLM_FL_LVB_READY;
1027         cfs_waitq_signal(&lock->l_waitq);
1028 }
1029
1030 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1031 {
1032         lock_res_and_lock(lock);
1033         ldlm_lock_allow_match_locked(lock);
1034         unlock_res_and_lock(lock);
1035 }
1036
1037 /* Can be called in two ways:
1038  *
1039  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1040  * for a duplicate of.
1041  *
1042  * Otherwise, all of the fields must be filled in, to match against.
1043  *
1044  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1045  *     server (ie, connh is NULL)
1046  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1047  *     list will be considered
1048  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1049  *     to be canceled can still be matched as long as they still have reader
1050  *     or writer refernces
1051  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1052  *     just tell us if we would have matched.
1053  *
1054  * Returns 1 if it finds an already-existing lock that is compatible; in this
1055  * case, lockh is filled in with a addref()ed lock
1056  *
1057  * we also check security context, if that failed we simply return 0 (to keep
1058  * caller code unchanged), the context failure will be discovered by caller
1059  * sometime later.
1060  */
1061 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1062                             const struct ldlm_res_id *res_id, ldlm_type_t type,
1063                             ldlm_policy_data_t *policy, ldlm_mode_t mode,
1064                             struct lustre_handle *lockh, int unref)
1065 {
1066         struct ldlm_resource *res;
1067         struct ldlm_lock *lock, *old_lock = NULL;
1068         int rc = 0;
1069         ENTRY;
1070
1071         if (ns == NULL) {
1072                 old_lock = ldlm_handle2lock(lockh);
1073                 LASSERT(old_lock);
1074
1075                 ns = ldlm_lock_to_ns(old_lock);
1076                 res_id = &old_lock->l_resource->lr_name;
1077                 type = old_lock->l_resource->lr_type;
1078                 mode = old_lock->l_req_mode;
1079         }
1080
1081         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1082         if (res == NULL) {
1083                 LASSERT(old_lock == NULL);
1084                 RETURN(0);
1085         }
1086
1087         LDLM_RESOURCE_ADDREF(res);
1088         lock_res(res);
1089
1090         lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
1091                             flags, unref);
1092         if (lock != NULL)
1093                 GOTO(out, rc = 1);
1094         if (flags & LDLM_FL_BLOCK_GRANTED)
1095                 GOTO(out, rc = 0);
1096         lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
1097                             flags, unref);
1098         if (lock != NULL)
1099                 GOTO(out, rc = 1);
1100         lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
1101                             flags, unref);
1102         if (lock != NULL)
1103                 GOTO(out, rc = 1);
1104
1105         EXIT;
1106  out:
1107         unlock_res(res);
1108         LDLM_RESOURCE_DELREF(res);
1109         ldlm_resource_putref(res);
1110
1111         if (lock) {
1112                 ldlm_lock2handle(lock, lockh);
1113                 if ((flags & LDLM_FL_LVB_READY) &&
1114                     (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1115                         struct l_wait_info lwi;
1116                         if (lock->l_completion_ast) {
1117                                 int err = lock->l_completion_ast(lock,
1118                                                           LDLM_FL_WAIT_NOREPROC,
1119                                                                  NULL);
1120                                 if (err) {
1121                                         if (flags & LDLM_FL_TEST_LOCK)
1122                                                 LDLM_LOCK_RELEASE(lock);
1123                                         else
1124                                                 ldlm_lock_decref_internal(lock,
1125                                                                           mode);
1126                                         rc = 0;
1127                                         goto out2;
1128                                 }
1129                         }
1130
1131                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1132                                                NULL, LWI_ON_SIGNAL_NOOP, NULL);
1133
1134                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1135                         l_wait_event(lock->l_waitq,
1136                                      (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1137                 }
1138         }
1139  out2:
1140         if (rc) {
1141                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1142                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1143                                 res_id->name[2] : policy->l_extent.start,
1144                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1145                                 res_id->name[3] : policy->l_extent.end);
1146
1147                 /* check user's security context */
1148                 if (lock->l_conn_export &&
1149                     sptlrpc_import_check_ctx(
1150                                 class_exp2cliimp(lock->l_conn_export))) {
1151                         if (!(flags & LDLM_FL_TEST_LOCK))
1152                                 ldlm_lock_decref_internal(lock, mode);
1153                         rc = 0;
1154                 }
1155
1156                 if (flags & LDLM_FL_TEST_LOCK)
1157                         LDLM_LOCK_RELEASE(lock);
1158
1159         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1160                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1161                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1162                                   type, mode, res_id->name[0], res_id->name[1],
1163                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1164                                         res_id->name[2] :policy->l_extent.start,
1165                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1166                                         res_id->name[3] : policy->l_extent.end);
1167         }
1168         if (old_lock)
1169                 LDLM_LOCK_PUT(old_lock);
1170
1171         return rc ? mode : 0;
1172 }
1173
1174 /* Returns a referenced lock */
1175 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1176                                    const struct ldlm_res_id *res_id,
1177                                    ldlm_type_t type,
1178                                    ldlm_mode_t mode,
1179                                    const struct ldlm_callback_suite *cbs,
1180                                    void *data, __u32 lvb_len)
1181 {
1182         struct ldlm_lock *lock;
1183         struct ldlm_resource *res;
1184         ENTRY;
1185
1186         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1187         if (res == NULL)
1188                 RETURN(NULL);
1189
1190         lock = ldlm_lock_new(res);
1191
1192         if (lock == NULL)
1193                 RETURN(NULL);
1194
1195         lock->l_req_mode = mode;
1196         lock->l_ast_data = data;
1197         lock->l_pid = cfs_curproc_pid();
1198         lock->l_ns_srv = ns_is_server(ns);
1199         if (cbs) {
1200                 lock->l_blocking_ast = cbs->lcs_blocking;
1201                 lock->l_completion_ast = cbs->lcs_completion;
1202                 lock->l_glimpse_ast = cbs->lcs_glimpse;
1203                 lock->l_weigh_ast = cbs->lcs_weigh;
1204         }
1205
1206         lock->l_tree_node = NULL;
1207         /* if this is the extent lock, allocate the interval tree node */
1208         if (type == LDLM_EXTENT) {
1209                 if (ldlm_interval_alloc(lock) == NULL)
1210                         GOTO(out, 0);
1211         }
1212
1213         if (lvb_len) {
1214                 lock->l_lvb_len = lvb_len;
1215                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1216                 if (lock->l_lvb_data == NULL)
1217                         GOTO(out, 0);
1218         }
1219
1220         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1221                 GOTO(out, 0);
1222
1223         RETURN(lock);
1224
1225 out:
1226         ldlm_lock_destroy(lock);
1227         LDLM_LOCK_RELEASE(lock);
1228         return NULL;
1229 }
1230
1231 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1232                                struct ldlm_lock **lockp,
1233                                void *cookie, int *flags)
1234 {
1235         struct ldlm_lock *lock = *lockp;
1236         struct ldlm_resource *res = lock->l_resource;
1237         int local = ns_is_client(ldlm_res_to_ns(res));
1238         ldlm_processing_policy policy;
1239         ldlm_error_t rc = ELDLM_OK;
1240         struct ldlm_interval *node = NULL;
1241         ENTRY;
1242
1243         lock->l_last_activity = cfs_time_current_sec();
1244         /* policies are not executed on the client or during replay */
1245         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1246             && !local && ns->ns_policy) {
1247                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1248                                    NULL);
1249                 if (rc == ELDLM_LOCK_REPLACED) {
1250                         /* The lock that was returned has already been granted,
1251                          * and placed into lockp.  If it's not the same as the
1252                          * one we passed in, then destroy the old one and our
1253                          * work here is done. */
1254                         if (lock != *lockp) {
1255                                 ldlm_lock_destroy(lock);
1256                                 LDLM_LOCK_RELEASE(lock);
1257                         }
1258                         *flags |= LDLM_FL_LOCK_CHANGED;
1259                         RETURN(0);
1260                 } else if (rc != ELDLM_OK ||
1261                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1262                         ldlm_lock_destroy(lock);
1263                         RETURN(rc);
1264                 }
1265         }
1266
1267         /* For a replaying lock, it might be already in granted list. So
1268          * unlinking the lock will cause the interval node to be freed, we
1269          * have to allocate the interval node early otherwise we can't regrant
1270          * this lock in the future. - jay */
1271         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1272                 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1273
1274         lock_res_and_lock(lock);
1275         if (local && lock->l_req_mode == lock->l_granted_mode) {
1276                 /* The server returned a blocked lock, but it was granted
1277                  * before we got a chance to actually enqueue it.  We don't
1278                  * need to do anything else. */
1279                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1280                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1281                 GOTO(out, ELDLM_OK);
1282         }
1283
1284         ldlm_resource_unlink_lock(lock);
1285         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1286                 if (node == NULL) {
1287                         ldlm_lock_destroy_nolock(lock);
1288                         GOTO(out, rc = -ENOMEM);
1289                 }
1290
1291                 CFS_INIT_LIST_HEAD(&node->li_group);
1292                 ldlm_interval_attach(node, lock);
1293                 node = NULL;
1294         }
1295
1296         /* Some flags from the enqueue want to make it into the AST, via the
1297          * lock's l_flags. */
1298         lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1299
1300         /* This distinction between local lock trees is very important; a client
1301          * namespace only has information about locks taken by that client, and
1302          * thus doesn't have enough information to decide for itself if it can
1303          * be granted (below).  In this case, we do exactly what the server
1304          * tells us to do, as dictated by the 'flags'.
1305          *
1306          * We do exactly the same thing during recovery, when the server is
1307          * more or less trusting the clients not to lie.
1308          *
1309          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1310          * granted/converting queues. */
1311         if (local) {
1312                 if (*flags & LDLM_FL_BLOCK_CONV)
1313                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1314                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1315                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1316                 else
1317                         ldlm_grant_lock(lock, NULL);
1318                 GOTO(out, ELDLM_OK);
1319         } else if (*flags & LDLM_FL_REPLAY) {
1320                 if (*flags & LDLM_FL_BLOCK_CONV) {
1321                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1322                         GOTO(out, ELDLM_OK);
1323                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1324                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1325                         GOTO(out, ELDLM_OK);
1326                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1327                         ldlm_grant_lock(lock, NULL);
1328                         GOTO(out, ELDLM_OK);
1329                 }
1330                 /* If no flags, fall through to normal enqueue path. */
1331         }
1332
1333         policy = ldlm_processing_policy_table[res->lr_type];
1334         policy(lock, flags, 1, &rc, NULL);
1335         GOTO(out, rc);
1336 out:
1337         unlock_res_and_lock(lock);
1338         if (node)
1339                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1340         return rc;
1341 }
1342
1343 /* Must be called with namespace taken: queue is waiting or converting. */
1344 int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
1345                          cfs_list_t *work_list)
1346 {
1347         cfs_list_t *tmp, *pos;
1348         ldlm_processing_policy policy;
1349         int flags;
1350         int rc = LDLM_ITER_CONTINUE;
1351         ldlm_error_t err;
1352         ENTRY;
1353
1354         check_res_locked(res);
1355
1356         policy = ldlm_processing_policy_table[res->lr_type];
1357         LASSERT(policy);
1358
1359         cfs_list_for_each_safe(tmp, pos, queue) {
1360                 struct ldlm_lock *pending;
1361                 pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
1362
1363                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1364
1365                 flags = 0;
1366                 rc = policy(pending, &flags, 0, &err, work_list);
1367                 if (rc != LDLM_ITER_CONTINUE)
1368                         break;
1369         }
1370
1371         RETURN(rc);
1372 }
1373
1374 /* Helper function for ldlm_run_ast_work().
1375  *
1376  * Send an existing rpc set specified by @arg->set and then
1377  * destroy it. Create new one if @do_create flag is set. */
1378 static void
1379 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1380 {
1381         ENTRY;
1382
1383         ptlrpc_set_wait(arg->set);
1384         if (arg->type == LDLM_BL_CALLBACK)
1385                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1386         ptlrpc_set_destroy(arg->set);
1387
1388         if (do_create)
1389                 arg->set = ptlrpc_prep_set();
1390
1391         EXIT;
1392 }
1393
1394 static int
1395 ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1396 {
1397         struct ldlm_lock_desc d;
1398         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1399                                                 l_bl_ast);
1400         ENTRY;
1401
1402         /* nobody should touch l_bl_ast */
1403         lock_res_and_lock(lock);
1404         cfs_list_del_init(&lock->l_bl_ast);
1405
1406         LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1407         LASSERT(lock->l_bl_ast_run == 0);
1408         LASSERT(lock->l_blocking_lock);
1409         lock->l_bl_ast_run++;
1410         unlock_res_and_lock(lock);
1411
1412         ldlm_lock2desc(lock->l_blocking_lock, &d);
1413
1414         lock->l_blocking_ast(lock, &d, (void *)arg,
1415                              LDLM_CB_BLOCKING);
1416         LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1417         lock->l_blocking_lock = NULL;
1418         LDLM_LOCK_RELEASE(lock);
1419
1420         RETURN(1);
1421 }
1422
1423 static int
1424 ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1425 {
1426         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
1427         ldlm_completion_callback completion_callback;
1428         int rc = 0;
1429         ENTRY;
1430
1431         /* It's possible to receive a completion AST before we've set
1432          * the l_completion_ast pointer: either because the AST arrived
1433          * before the reply, or simply because there's a small race
1434          * window between receiving the reply and finishing the local
1435          * enqueue. (bug 842)
1436          *
1437          * This can't happen with the blocking_ast, however, because we
1438          * will never call the local blocking_ast until we drop our
1439          * reader/writer reference, which we won't do until we get the
1440          * reply and finish enqueueing. */
1441
1442         /* nobody should touch l_cp_ast */
1443         lock_res_and_lock(lock);
1444         cfs_list_del_init(&lock->l_cp_ast);
1445         LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1446         /* save l_completion_ast since it can be changed by
1447          * mds_intent_policy(), see bug 14225 */
1448         completion_callback = lock->l_completion_ast;
1449         lock->l_flags &= ~LDLM_FL_CP_REQD;
1450         unlock_res_and_lock(lock);
1451
1452         if (completion_callback != NULL) {
1453                 completion_callback(lock, 0, (void *)arg);
1454                 rc = 1;
1455         }
1456         LDLM_LOCK_RELEASE(lock);
1457
1458         RETURN(rc);
1459 }
1460
1461 static int
1462 ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1463 {
1464         struct ldlm_lock_desc desc;
1465         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1466                                                 l_rk_ast);
1467         ENTRY;
1468
1469         cfs_list_del_init(&lock->l_rk_ast);
1470
1471         /* the desc just pretend to exclusive */
1472         ldlm_lock2desc(lock, &desc);
1473         desc.l_req_mode = LCK_EX;
1474         desc.l_granted_mode = 0;
1475
1476         lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1477         LDLM_LOCK_RELEASE(lock);
1478
1479         RETURN(1);
1480 }
1481
1482 int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
1483 {
1484         struct ldlm_cb_set_arg arg;
1485         cfs_list_t *tmp, *pos;
1486         int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
1487         int ast_count;
1488         ENTRY;
1489
1490         if (cfs_list_empty(rpc_list))
1491                 RETURN(0);
1492
1493         arg.set = ptlrpc_prep_set();
1494         if (NULL == arg.set)
1495                 RETURN(-ERESTART);
1496         cfs_atomic_set(&arg.restart, 0);
1497         switch (ast_type) {
1498         case LDLM_WORK_BL_AST:
1499                 arg.type = LDLM_BL_CALLBACK;
1500                 work_ast_lock = ldlm_work_bl_ast_lock;
1501                 break;
1502         case LDLM_WORK_CP_AST:
1503                 arg.type = LDLM_CP_CALLBACK;
1504                 work_ast_lock = ldlm_work_cp_ast_lock;
1505                 break;
1506         case LDLM_WORK_REVOKE_AST:
1507                 arg.type = LDLM_BL_CALLBACK;
1508                 work_ast_lock = ldlm_work_revoke_ast_lock;
1509                 break;
1510         default:
1511                 LBUG();
1512         }
1513
1514         ast_count = 0;
1515         cfs_list_for_each_safe(tmp, pos, rpc_list) {
1516                 ast_count += work_ast_lock(tmp, &arg);
1517
1518                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1519                  * and create a new set for requests that remained in
1520                  * @rpc_list */
1521                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1522                         ldlm_send_and_maybe_create_set(&arg, 1);
1523                         ast_count = 0;
1524                 }
1525         }
1526
1527         if (ast_count > 0)
1528                 ldlm_send_and_maybe_create_set(&arg, 0);
1529         else
1530                 /* In case when number of ASTs is multiply of
1531                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1532                  * @arg.set must be destroyed here, otherwise we get
1533                  * write memory leaking. */
1534                 ptlrpc_set_destroy(arg.set);
1535
1536         RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
1537 }
1538
1539 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1540 {
1541         ldlm_reprocess_all(res);
1542         return LDLM_ITER_CONTINUE;
1543 }
1544
1545 static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1546                               cfs_hlist_node_t *hnode, void *arg)
1547 {
1548         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1549         int    rc;
1550
1551         rc = reprocess_one_queue(res, arg);
1552
1553         return rc == LDLM_ITER_STOP;
1554 }
1555
1556 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1557 {
1558         ENTRY;
1559
1560         if (ns != NULL) {
1561                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1562                                          ldlm_reprocess_res, NULL);
1563         }
1564         EXIT;
1565 }
1566
1567 void ldlm_reprocess_all(struct ldlm_resource *res)
1568 {
1569         CFS_LIST_HEAD(rpc_list);
1570         int rc;
1571         ENTRY;
1572
1573         /* Local lock trees don't get reprocessed. */
1574         if (ns_is_client(ldlm_res_to_ns(res))) {
1575                 EXIT;
1576                 return;
1577         }
1578
1579  restart:
1580         lock_res(res);
1581         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1582         if (rc == LDLM_ITER_CONTINUE)
1583                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1584         unlock_res(res);
1585
1586         rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1587         if (rc == -ERESTART) {
1588                 LASSERT(cfs_list_empty(&rpc_list));
1589                 goto restart;
1590         }
1591         EXIT;
1592 }
1593
1594 void ldlm_cancel_callback(struct ldlm_lock *lock)
1595 {
1596         check_res_locked(lock->l_resource);
1597         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1598                 lock->l_flags |= LDLM_FL_CANCEL;
1599                 if (lock->l_blocking_ast) {
1600                         // l_check_no_ns_lock(ns);
1601                         unlock_res_and_lock(lock);
1602                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1603                                              LDLM_CB_CANCELING);
1604                         lock_res_and_lock(lock);
1605                 } else {
1606                         LDLM_DEBUG(lock, "no blocking ast");
1607                 }
1608         }
1609         lock->l_flags |= LDLM_FL_BL_DONE;
1610 }
1611
1612 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1613 {
1614         if (req->l_resource->lr_type != LDLM_PLAIN &&
1615             req->l_resource->lr_type != LDLM_IBITS)
1616                 return;
1617
1618         cfs_list_del_init(&req->l_sl_policy);
1619         cfs_list_del_init(&req->l_sl_mode);
1620 }
1621
1622 void ldlm_lock_cancel(struct ldlm_lock *lock)
1623 {
1624         struct ldlm_resource *res;
1625         struct ldlm_namespace *ns;
1626         ENTRY;
1627
1628         lock_res_and_lock(lock);
1629
1630         res = lock->l_resource;
1631         ns  = ldlm_res_to_ns(res);
1632
1633         /* Please do not, no matter how tempting, remove this LBUG without
1634          * talking to me first. -phik */
1635         if (lock->l_readers || lock->l_writers) {
1636                 LDLM_ERROR(lock, "lock still has references");
1637                 LBUG();
1638         }
1639
1640         ldlm_del_waiting_lock(lock);
1641
1642         /* Releases cancel callback. */
1643         ldlm_cancel_callback(lock);
1644
1645         /* Yes, second time, just in case it was added again while we were
1646            running with no res lock in ldlm_cancel_callback */
1647         ldlm_del_waiting_lock(lock);
1648         ldlm_resource_unlink_lock(lock);
1649         ldlm_lock_destroy_nolock(lock);
1650
1651         if (lock->l_granted_mode == lock->l_req_mode)
1652                 ldlm_pool_del(&ns->ns_pool, lock);
1653
1654         /* Make sure we will not be called again for same lock what is possible
1655          * if not to zero out lock->l_granted_mode */
1656         lock->l_granted_mode = LCK_MINMODE;
1657         unlock_res_and_lock(lock);
1658
1659         EXIT;
1660 }
1661
1662 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1663 {
1664         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1665         ENTRY;
1666
1667         if (lock == NULL)
1668                 RETURN(-EINVAL);
1669
1670         lock->l_ast_data = data;
1671         LDLM_LOCK_PUT(lock);
1672         RETURN(0);
1673 }
1674
1675 int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1676                                     cfs_hlist_node_t *hnode, void *data)
1677
1678 {
1679         struct obd_export    *exp  = data;
1680         struct ldlm_lock     *lock = cfs_hash_object(hs, hnode);
1681         struct ldlm_resource *res;
1682
1683         res = ldlm_resource_getref(lock->l_resource);
1684         LDLM_LOCK_GET(lock);
1685
1686         LDLM_DEBUG(lock, "export %p", exp);
1687         ldlm_res_lvbo_update(res, NULL, 1);
1688         ldlm_lock_cancel(lock);
1689         ldlm_reprocess_all(res);
1690         ldlm_resource_putref(res);
1691         LDLM_LOCK_RELEASE(lock);
1692         return 0;
1693 }
1694
1695 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1696 {
1697         cfs_hash_for_each_empty(exp->exp_lock_hash,
1698                                 ldlm_cancel_locks_for_export_cb, exp);
1699 }
1700
1701 /**
1702  * Downgrade an exclusive lock.
1703  *
1704  * A fast variant of ldlm_lock_convert for convertion of exclusive
1705  * locks. The convertion is always successful.
1706  *
1707  * \param lock A lock to convert
1708  * \param new_mode new lock mode
1709  */
1710 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
1711 {
1712         ENTRY;
1713
1714         LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
1715         LASSERT(new_mode == LCK_COS);
1716
1717         lock_res_and_lock(lock);
1718         ldlm_resource_unlink_lock(lock);
1719         /*
1720          * Remove the lock from pool as it will be added again in
1721          * ldlm_grant_lock() called below.
1722          */
1723         ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
1724
1725         lock->l_req_mode = new_mode;
1726         ldlm_grant_lock(lock, NULL);
1727         unlock_res_and_lock(lock);
1728         ldlm_reprocess_all(lock->l_resource);
1729
1730         EXIT;
1731 }
1732
1733 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1734                                         __u32 *flags)
1735 {
1736         CFS_LIST_HEAD(rpc_list);
1737         struct ldlm_resource *res;
1738         struct ldlm_namespace *ns;
1739         int granted = 0;
1740         int old_mode, rc;
1741         struct sl_insert_point prev;
1742         ldlm_error_t err;
1743         struct ldlm_interval *node;
1744         ENTRY;
1745
1746         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1747                 *flags |= LDLM_FL_BLOCK_GRANTED;
1748                 RETURN(lock->l_resource);
1749         }
1750
1751         /* I can't check the type of lock here because the bitlock of lock
1752          * is not held here, so do the allocation blindly. -jay */
1753         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1754         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
1755                 RETURN(NULL);
1756
1757         LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
1758                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1759
1760         lock_res_and_lock(lock);
1761
1762         res = lock->l_resource;
1763         ns  = ldlm_res_to_ns(res);
1764
1765         old_mode = lock->l_req_mode;
1766         lock->l_req_mode = new_mode;
1767         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1768                 /* remember the lock position where the lock might be
1769                  * added back to the granted list later and also
1770                  * remember the join mode for skiplist fixing. */
1771                 prev.res_link = lock->l_res_link.prev;
1772                 prev.mode_link = lock->l_sl_mode.prev;
1773                 prev.policy_link = lock->l_sl_policy.prev;
1774                 ldlm_resource_unlink_lock(lock);
1775         } else {
1776                 ldlm_resource_unlink_lock(lock);
1777                 if (res->lr_type == LDLM_EXTENT) {
1778                         /* FIXME: ugly code, I have to attach the lock to a
1779                          * interval node again since perhaps it will be granted
1780                          * soon */
1781                         CFS_INIT_LIST_HEAD(&node->li_group);
1782                         ldlm_interval_attach(node, lock);
1783                         node = NULL;
1784                 }
1785         }
1786
1787         /*
1788          * Remove old lock from the pool before adding the lock with new
1789          * mode below in ->policy()
1790          */
1791         ldlm_pool_del(&ns->ns_pool, lock);
1792
1793         /* If this is a local resource, put it on the appropriate list. */
1794         if (ns_is_client(ldlm_res_to_ns(res))) {
1795                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1796                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1797                 } else {
1798                         /* This should never happen, because of the way the
1799                          * server handles conversions. */
1800                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1801                                    *flags);
1802                         LBUG();
1803
1804                         ldlm_grant_lock(lock, &rpc_list);
1805                         granted = 1;
1806                         /* FIXME: completion handling not with lr_lock held ! */
1807                         if (lock->l_completion_ast)
1808                                 lock->l_completion_ast(lock, 0, NULL);
1809                 }
1810         } else {
1811                 int pflags = 0;
1812                 ldlm_processing_policy policy;
1813                 policy = ldlm_processing_policy_table[res->lr_type];
1814                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1815                 if (rc == LDLM_ITER_STOP) {
1816                         lock->l_req_mode = old_mode;
1817                         if (res->lr_type == LDLM_EXTENT)
1818                                 ldlm_extent_add_lock(res, lock);
1819                         else
1820                                 ldlm_granted_list_add_lock(lock, &prev);
1821
1822                         res = NULL;
1823                 } else {
1824                         *flags |= LDLM_FL_BLOCK_GRANTED;
1825                         granted = 1;
1826                 }
1827         }
1828         unlock_res_and_lock(lock);
1829
1830         if (granted)
1831                 ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1832         if (node)
1833                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1834         RETURN(res);
1835 }
1836
1837 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1838 {
1839         struct obd_device *obd = NULL;
1840
1841         if (!((libcfs_debug | D_ERROR) & level))
1842                 return;
1843
1844         if (!lock) {
1845                 CDEBUG(level, "  NULL LDLM lock\n");
1846                 return;
1847         }
1848
1849         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1850                lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1851                pos, lock->l_pid);
1852         if (lock->l_conn_export != NULL)
1853                 obd = lock->l_conn_export->exp_obd;
1854         if (lock->l_export && lock->l_export->exp_connection) {
1855                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1856                      libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1857                      lock->l_remote_handle.cookie);
1858         } else if (obd == NULL) {
1859                 CDEBUG(level, "  Node: local\n");
1860         } else {
1861                 struct obd_import *imp = obd->u.cli.cl_import;
1862                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1863                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1864                        lock->l_remote_handle.cookie);
1865         }
1866         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1867                   lock->l_resource,
1868                   lock->l_resource->lr_name.name[0],
1869                   lock->l_resource->lr_name.name[1],
1870                   lock->l_resource->lr_name.name[2]);
1871         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1872                "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
1873                ldlm_lockname[lock->l_granted_mode],
1874                cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1875                lock->l_flags);
1876         if (lock->l_resource->lr_type == LDLM_EXTENT)
1877                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1878                        " (req "LPU64"-"LPU64")\n",
1879                        lock->l_policy_data.l_extent.start,
1880                        lock->l_policy_data.l_extent.end,
1881                        lock->l_req_extent.start, lock->l_req_extent.end);
1882         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1883                 CDEBUG(level, "  Pid: %d Extent: "LPU64" -> "LPU64"\n",
1884                        lock->l_policy_data.l_flock.pid,
1885                        lock->l_policy_data.l_flock.start,
1886                        lock->l_policy_data.l_flock.end);
1887        else if (lock->l_resource->lr_type == LDLM_IBITS)
1888                 CDEBUG(level, "  Bits: "LPX64"\n",
1889                        lock->l_policy_data.l_inodebits.bits);
1890 }
1891
1892 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1893 {
1894         struct ldlm_lock *lock;
1895
1896         if (!((libcfs_debug | D_ERROR) & level))
1897                 return;
1898
1899         lock = ldlm_handle2lock(lockh);
1900         if (lock == NULL)
1901                 return;
1902
1903         ldlm_lock_dump(D_OTHER, lock, 0);
1904
1905         LDLM_LOCK_PUT(lock);
1906 }
1907
1908 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1909                       struct libcfs_debug_msg_data *data, const char *fmt,
1910                       ...)
1911 {
1912         va_list args;
1913         cfs_debug_limit_state_t *cdls = data->msg_cdls;
1914
1915         va_start(args, fmt);
1916
1917         if (lock->l_resource == NULL) {
1918                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1919                                    data->msg_fn, data->msg_line, fmt, args,
1920                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1921                        "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
1922                        LPX64" expref: %d pid: %u timeout: %lu\n", lock,
1923                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1924                        lock->l_readers, lock->l_writers,
1925                        ldlm_lockname[lock->l_granted_mode],
1926                        ldlm_lockname[lock->l_req_mode],
1927                        lock->l_flags, lock->l_remote_handle.cookie,
1928                        lock->l_export ?
1929                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1930                        lock->l_pid, lock->l_callback_timeout);
1931                 va_end(args);
1932                 return;
1933         }
1934
1935         switch (lock->l_resource->lr_type) {
1936         case LDLM_EXTENT:
1937                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1938                                    data->msg_fn, data->msg_line, fmt, args,
1939                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1940                        "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1941                        "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
1942                        " expref: %d pid: %u timeout %lu\n",
1943                        ldlm_lock_to_ns_name(lock), lock,
1944                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1945                        lock->l_readers, lock->l_writers,
1946                        ldlm_lockname[lock->l_granted_mode],
1947                        ldlm_lockname[lock->l_req_mode],
1948                        lock->l_resource->lr_name.name[0],
1949                        lock->l_resource->lr_name.name[1],
1950                        cfs_atomic_read(&lock->l_resource->lr_refcount),
1951                        ldlm_typename[lock->l_resource->lr_type],
1952                        lock->l_policy_data.l_extent.start,
1953                        lock->l_policy_data.l_extent.end,
1954                        lock->l_req_extent.start, lock->l_req_extent.end,
1955                        lock->l_flags, lock->l_remote_handle.cookie,
1956                        lock->l_export ?
1957                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1958                        lock->l_pid, lock->l_callback_timeout);
1959                 break;
1960
1961         case LDLM_FLOCK:
1962                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1963                                    data->msg_fn, data->msg_line, fmt, args,
1964                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1965                        "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1966                        "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
1967                        " expref: %d pid: %u timeout: %lu\n",
1968                        ldlm_lock_to_ns_name(lock), lock,
1969                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1970                        lock->l_readers, lock->l_writers,
1971                        ldlm_lockname[lock->l_granted_mode],
1972                        ldlm_lockname[lock->l_req_mode],
1973                        lock->l_resource->lr_name.name[0],
1974                        lock->l_resource->lr_name.name[1],
1975                        cfs_atomic_read(&lock->l_resource->lr_refcount),
1976                        ldlm_typename[lock->l_resource->lr_type],
1977                        lock->l_policy_data.l_flock.pid,
1978                        lock->l_policy_data.l_flock.start,
1979                        lock->l_policy_data.l_flock.end,
1980                        lock->l_flags, lock->l_remote_handle.cookie,
1981                        lock->l_export ?
1982                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1983                        lock->l_pid, lock->l_callback_timeout);
1984                 break;
1985
1986         case LDLM_IBITS:
1987                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1988                                    data->msg_fn, data->msg_line, fmt, args,
1989                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1990                        "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1991                        "flags: "LPX64" remote: "LPX64" expref: %d "
1992                        "pid: %u timeout: %lu\n",
1993                        ldlm_lock_to_ns_name(lock),
1994                        lock, lock->l_handle.h_cookie,
1995                        cfs_atomic_read (&lock->l_refc),
1996                        lock->l_readers, lock->l_writers,
1997                        ldlm_lockname[lock->l_granted_mode],
1998                        ldlm_lockname[lock->l_req_mode],
1999                        lock->l_resource->lr_name.name[0],
2000                        lock->l_resource->lr_name.name[1],
2001                        lock->l_policy_data.l_inodebits.bits,
2002                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2003                        ldlm_typename[lock->l_resource->lr_type],
2004                        lock->l_flags, lock->l_remote_handle.cookie,
2005                        lock->l_export ?
2006                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2007                        lock->l_pid, lock->l_callback_timeout);
2008                 break;
2009
2010         default:
2011                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2012                                    data->msg_fn, data->msg_line, fmt, args,
2013                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2014                        "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
2015                        "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
2016                        ldlm_lock_to_ns_name(lock),
2017                        lock, lock->l_handle.h_cookie,
2018                        cfs_atomic_read (&lock->l_refc),
2019                        lock->l_readers, lock->l_writers,
2020                        ldlm_lockname[lock->l_granted_mode],
2021                        ldlm_lockname[lock->l_req_mode],
2022                        lock->l_resource->lr_name.name[0],
2023                        lock->l_resource->lr_name.name[1],
2024                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2025                        ldlm_typename[lock->l_resource->lr_type],
2026                        lock->l_flags, lock->l_remote_handle.cookie,
2027                        lock->l_export ?
2028                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2029                        lock->l_pid, lock->l_callback_timeout);
2030                 break;
2031         }
2032         va_end(args);
2033 }
2034 EXPORT_SYMBOL(_ldlm_lock_debug);