Whamcloud - gitweb
d78876726138e895ad53e0f06daf29f3df9d617f
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lock.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 # ifndef HAVE_VFS_INTENT_PATCHES
47 # include <linux/lustre_intent.h>
48 # endif
49 #else
50 # include <liblustre.h>
51 #endif
52
53 #include <obd_class.h>
54 #include "ldlm_internal.h"
55
56 /* lock types */
57 char *ldlm_lockname[] = {
58         [0] "--",
59         [LCK_EX] "EX",
60         [LCK_PW] "PW",
61         [LCK_PR] "PR",
62         [LCK_CW] "CW",
63         [LCK_CR] "CR",
64         [LCK_NL] "NL",
65         [LCK_GROUP] "GROUP",
66         [LCK_COS] "COS"
67 };
68
69 char *ldlm_typename[] = {
70         [LDLM_PLAIN] "PLN",
71         [LDLM_EXTENT] "EXT",
72         [LDLM_FLOCK] "FLK",
73         [LDLM_IBITS] "IBT",
74 };
75
76 char *ldlm_it2str(int it)
77 {
78         switch (it) {
79         case IT_OPEN:
80                 return "open";
81         case IT_CREAT:
82                 return "creat";
83         case (IT_OPEN | IT_CREAT):
84                 return "open|creat";
85         case IT_READDIR:
86                 return "readdir";
87         case IT_GETATTR:
88                 return "getattr";
89         case IT_LOOKUP:
90                 return "lookup";
91         case IT_UNLINK:
92                 return "unlink";
93         case IT_GETXATTR:
94                 return "getxattr";
95         default:
96                 CERROR("Unknown intent %d\n", it);
97                 return "UNKNOWN";
98         }
99 }
100
101 extern cfs_mem_cache_t *ldlm_lock_slab;
102
103 static ldlm_processing_policy ldlm_processing_policy_table[] = {
104         [LDLM_PLAIN] ldlm_process_plain_lock,
105         [LDLM_EXTENT] ldlm_process_extent_lock,
106 #ifdef __KERNEL__
107         [LDLM_FLOCK] ldlm_process_flock_lock,
108 #endif
109         [LDLM_IBITS] ldlm_process_inodebits_lock,
110 };
111
112 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
113 {
114         return ldlm_processing_policy_table[res->lr_type];
115 }
116
117 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
118 {
119         ns->ns_policy = arg;
120 }
121
122 /*
123  * REFCOUNTED LOCK OBJECTS
124  */
125
126
127 /*
128  * Lock refcounts, during creation:
129  *   - one special one for allocation, dec'd only once in destroy
130  *   - one for being a lock that's in-use
131  *   - one for the addref associated with a new lock
132  */
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
134 {
135         cfs_atomic_inc(&lock->l_refc);
136         return lock;
137 }
138
139 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
140 {
141         LASSERT(size == sizeof(*lock));
142         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
143 }
144
145 void ldlm_lock_put(struct ldlm_lock *lock)
146 {
147         ENTRY;
148
149         LASSERT(lock->l_resource != LP_POISON);
150         LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
151         if (cfs_atomic_dec_and_test(&lock->l_refc)) {
152                 struct ldlm_resource *res;
153
154                 LDLM_DEBUG(lock,
155                            "final lock_put on destroyed lock, freeing it.");
156
157                 res = lock->l_resource;
158                 LASSERT(lock->l_destroyed);
159                 LASSERT(cfs_list_empty(&lock->l_res_link));
160                 LASSERT(cfs_list_empty(&lock->l_pending_chain));
161
162                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
163                                      LDLM_NSS_LOCKS);
164                 lu_ref_del(&res->lr_reference, "lock", lock);
165                 ldlm_resource_putref(res);
166                 lock->l_resource = NULL;
167                 if (lock->l_export) {
168                         class_export_lock_put(lock->l_export, lock);
169                         lock->l_export = NULL;
170                 }
171
172                 if (lock->l_lvb_data != NULL)
173                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
174
175                 ldlm_interval_free(ldlm_interval_detach(lock));
176                 lu_ref_fini(&lock->l_reference);
177                 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle,
178                                 ldlm_lock_free);
179         }
180
181         EXIT;
182 }
183
184 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
185 {
186         int rc = 0;
187         if (!cfs_list_empty(&lock->l_lru)) {
188                 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
189
190                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
191                 cfs_list_del_init(&lock->l_lru);
192                 if (lock->l_flags & LDLM_FL_SKIPPED)
193                         lock->l_flags &= ~LDLM_FL_SKIPPED;
194                 LASSERT(ns->ns_nr_unused > 0);
195                 ns->ns_nr_unused--;
196                 rc = 1;
197         }
198         return rc;
199 }
200
201 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
202 {
203         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
204         int rc;
205
206         ENTRY;
207         if (lock->l_ns_srv) {
208                 LASSERT(cfs_list_empty(&lock->l_lru));
209                 RETURN(0);
210         }
211
212         cfs_spin_lock(&ns->ns_lock);
213         rc = ldlm_lock_remove_from_lru_nolock(lock);
214         cfs_spin_unlock(&ns->ns_lock);
215         EXIT;
216         return rc;
217 }
218
219 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
220 {
221         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
222
223         lock->l_last_used = cfs_time_current();
224         LASSERT(cfs_list_empty(&lock->l_lru));
225         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
226         cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
227         LASSERT(ns->ns_nr_unused >= 0);
228         ns->ns_nr_unused++;
229 }
230
231 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
232 {
233         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
234
235         ENTRY;
236         cfs_spin_lock(&ns->ns_lock);
237         ldlm_lock_add_to_lru_nolock(lock);
238         cfs_spin_unlock(&ns->ns_lock);
239         EXIT;
240 }
241
242 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
243 {
244         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
245
246         ENTRY;
247         if (lock->l_ns_srv) {
248                 LASSERT(cfs_list_empty(&lock->l_lru));
249                 EXIT;
250                 return;
251         }
252
253         cfs_spin_lock(&ns->ns_lock);
254         if (!cfs_list_empty(&lock->l_lru)) {
255                 ldlm_lock_remove_from_lru_nolock(lock);
256                 ldlm_lock_add_to_lru_nolock(lock);
257         }
258         cfs_spin_unlock(&ns->ns_lock);
259         EXIT;
260 }
261
262 /* This used to have a 'strict' flag, which recovery would use to mark an
263  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
264  * shall explain why it's gone: with the new hash table scheme, once you call
265  * ldlm_lock_destroy, you can never drop your final references on this lock.
266  * Because it's not in the hash table anymore.  -phil */
267 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
268 {
269         ENTRY;
270
271         if (lock->l_readers || lock->l_writers) {
272                 LDLM_ERROR(lock, "lock still has references");
273                 ldlm_lock_dump(D_ERROR, lock, 0);
274                 LBUG();
275         }
276
277         if (!cfs_list_empty(&lock->l_res_link)) {
278                 LDLM_ERROR(lock, "lock still on resource");
279                 ldlm_lock_dump(D_ERROR, lock, 0);
280                 LBUG();
281         }
282
283         if (lock->l_destroyed) {
284                 LASSERT(cfs_list_empty(&lock->l_lru));
285                 EXIT;
286                 return 0;
287         }
288         lock->l_destroyed = 1;
289
290         if (lock->l_export && lock->l_export->exp_lock_hash &&
291             !cfs_hlist_unhashed(&lock->l_exp_hash))
292                 cfs_hash_del(lock->l_export->exp_lock_hash,
293                              &lock->l_remote_handle, &lock->l_exp_hash);
294
295         ldlm_lock_remove_from_lru(lock);
296         class_handle_unhash(&lock->l_handle);
297
298 #if 0
299         /* Wake anyone waiting for this lock */
300         /* FIXME: I should probably add yet another flag, instead of using
301          * l_export to only call this on clients */
302         if (lock->l_export)
303                 class_export_put(lock->l_export);
304         lock->l_export = NULL;
305         if (lock->l_export && lock->l_completion_ast)
306                 lock->l_completion_ast(lock, 0);
307 #endif
308         EXIT;
309         return 1;
310 }
311
312 void ldlm_lock_destroy(struct ldlm_lock *lock)
313 {
314         int first;
315         ENTRY;
316         lock_res_and_lock(lock);
317         first = ldlm_lock_destroy_internal(lock);
318         unlock_res_and_lock(lock);
319
320         /* drop reference from hashtable only for first destroy */
321         if (first) {
322                 lu_ref_del(&lock->l_reference, "hash", lock);
323                 LDLM_LOCK_RELEASE(lock);
324         }
325         EXIT;
326 }
327
328 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
329 {
330         int first;
331         ENTRY;
332         first = ldlm_lock_destroy_internal(lock);
333         /* drop reference from hashtable only for first destroy */
334         if (first) {
335                 lu_ref_del(&lock->l_reference, "hash", lock);
336                 LDLM_LOCK_RELEASE(lock);
337         }
338         EXIT;
339 }
340
341 /* this is called by portals_handle2object with the handle lock taken */
342 static void lock_handle_addref(void *lock)
343 {
344         LDLM_LOCK_GET((struct ldlm_lock *)lock);
345 }
346
347 /*
348  * usage: pass in a resource on which you have done ldlm_resource_get
349  *        new lock will take over the refcount.
350  * returns: lock with refcount 2 - one for current caller and one for remote
351  */
352 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
353 {
354         struct ldlm_lock *lock;
355         ENTRY;
356
357         if (resource == NULL)
358                 LBUG();
359
360         OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO);
361         if (lock == NULL)
362                 RETURN(NULL);
363
364         cfs_spin_lock_init(&lock->l_lock);
365         lock->l_resource = resource;
366         lu_ref_add(&resource->lr_reference, "lock", lock);
367
368         cfs_atomic_set(&lock->l_refc, 2);
369         CFS_INIT_LIST_HEAD(&lock->l_res_link);
370         CFS_INIT_LIST_HEAD(&lock->l_lru);
371         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
372         CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
373         CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
374         CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
375         cfs_waitq_init(&lock->l_waitq);
376         lock->l_blocking_lock = NULL;
377         CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
378         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
379         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
380
381         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
382                              LDLM_NSS_LOCKS);
383         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
384         class_handle_hash(&lock->l_handle, lock_handle_addref);
385
386         lu_ref_init(&lock->l_reference);
387         lu_ref_add(&lock->l_reference, "hash", lock);
388         lock->l_callback_timeout = 0;
389
390 #if LUSTRE_TRACKS_LOCK_EXP_REFS
391         CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
392         lock->l_exp_refs_nr = 0;
393         lock->l_exp_refs_target = NULL;
394 #endif
395
396         RETURN(lock);
397 }
398
399 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
400                               const struct ldlm_res_id *new_resid)
401 {
402         struct ldlm_resource *oldres = lock->l_resource;
403         struct ldlm_resource *newres;
404         int type;
405         ENTRY;
406
407         LASSERT(ns_is_client(ns));
408
409         lock_res_and_lock(lock);
410         if (memcmp(new_resid, &lock->l_resource->lr_name,
411                    sizeof(lock->l_resource->lr_name)) == 0) {
412                 /* Nothing to do */
413                 unlock_res_and_lock(lock);
414                 RETURN(0);
415         }
416
417         LASSERT(new_resid->name[0] != 0);
418
419         /* This function assumes that the lock isn't on any lists */
420         LASSERT(cfs_list_empty(&lock->l_res_link));
421
422         type = oldres->lr_type;
423         unlock_res_and_lock(lock);
424
425         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
426         if (newres == NULL)
427                 RETURN(-ENOMEM);
428
429         lu_ref_add(&newres->lr_reference, "lock", lock);
430         /*
431          * To flip the lock from the old to the new resource, lock, oldres and
432          * newres have to be locked. Resource spin-locks are nested within
433          * lock->l_lock, and are taken in the memory address order to avoid
434          * dead-locks.
435          */
436         cfs_spin_lock(&lock->l_lock);
437         oldres = lock->l_resource;
438         if (oldres < newres) {
439                 lock_res(oldres);
440                 lock_res_nested(newres, LRT_NEW);
441         } else {
442                 lock_res(newres);
443                 lock_res_nested(oldres, LRT_NEW);
444         }
445         LASSERT(memcmp(new_resid, &oldres->lr_name,
446                        sizeof oldres->lr_name) != 0);
447         lock->l_resource = newres;
448         unlock_res(oldres);
449         unlock_res_and_lock(lock);
450
451         /* ...and the flowers are still standing! */
452         lu_ref_del(&oldres->lr_reference, "lock", lock);
453         ldlm_resource_putref(oldres);
454
455         RETURN(0);
456 }
457
458 /*
459  *  HANDLES
460  */
461
462 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
463 {
464         lockh->cookie = lock->l_handle.h_cookie;
465 }
466
467 /* if flags: atomically get the lock and set the flags.
468  *           Return NULL if flag already set
469  */
470
471 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
472                                      int flags)
473 {
474         struct ldlm_namespace *ns;
475         struct ldlm_lock *lock, *retval = NULL;
476         ENTRY;
477
478         LASSERT(handle);
479
480         lock = class_handle2object(handle->cookie);
481         if (lock == NULL)
482                 RETURN(NULL);
483
484         LASSERT(lock->l_resource != NULL);
485         ns = ldlm_lock_to_ns(lock);
486         LASSERT(ns != NULL);
487
488         lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
489         lock_res_and_lock(lock);
490
491         /* It's unlikely but possible that someone marked the lock as
492          * destroyed after we did handle2object on it */
493         if (lock->l_destroyed) {
494                 unlock_res_and_lock(lock);
495                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
496                 LDLM_LOCK_PUT(lock);
497                 GOTO(out, retval);
498         }
499
500         if (flags && (lock->l_flags & flags)) {
501                 unlock_res_and_lock(lock);
502                 LDLM_LOCK_PUT(lock);
503                 GOTO(out, retval);
504         }
505
506         if (flags)
507                 lock->l_flags |= flags;
508
509         unlock_res_and_lock(lock);
510         retval = lock;
511         EXIT;
512  out:
513         return retval;
514 }
515
516 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
517 {
518         struct obd_export *exp = lock->l_export?:lock->l_conn_export;
519         /* INODEBITS_INTEROP: If the other side does not support
520          * inodebits, reply with a plain lock descriptor.
521          */
522         if ((lock->l_resource->lr_type == LDLM_IBITS) &&
523             (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
524                 /* Make sure all the right bits are set in this lock we
525                    are going to pass to client */
526                 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
527                          (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
528                          "Inappropriate inode lock bits during "
529                          "conversion " LPU64 "\n",
530                          lock->l_policy_data.l_inodebits.bits);
531
532                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
533                 desc->l_resource.lr_type = LDLM_PLAIN;
534
535                 /* Convert "new" lock mode to something old client can
536                    understand */
537                 if ((lock->l_req_mode == LCK_CR) ||
538                     (lock->l_req_mode == LCK_CW))
539                         desc->l_req_mode = LCK_PR;
540                 else
541                         desc->l_req_mode = lock->l_req_mode;
542                 if ((lock->l_granted_mode == LCK_CR) ||
543                     (lock->l_granted_mode == LCK_CW)) {
544                         desc->l_granted_mode = LCK_PR;
545                 } else {
546                         /* We never grant PW/EX locks to clients */
547                         LASSERT((lock->l_granted_mode != LCK_PW) &&
548                                 (lock->l_granted_mode != LCK_EX));
549                         desc->l_granted_mode = lock->l_granted_mode;
550                 }
551
552                 /* We do not copy policy here, because there is no
553                    policy for plain locks */
554         } else {
555                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
556                 desc->l_req_mode = lock->l_req_mode;
557                 desc->l_granted_mode = lock->l_granted_mode;
558                 desc->l_policy_data = lock->l_policy_data;
559         }
560 }
561
562 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
563                            cfs_list_t *work_list)
564 {
565         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
566                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
567                 lock->l_flags |= LDLM_FL_AST_SENT;
568                 /* If the enqueuing client said so, tell the AST recipient to
569                  * discard dirty data, rather than writing back. */
570                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
571                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
572                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
573                 cfs_list_add(&lock->l_bl_ast, work_list);
574                 LDLM_LOCK_GET(lock);
575                 LASSERT(lock->l_blocking_lock == NULL);
576                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
577         }
578 }
579
580 void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
581 {
582         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
583                 lock->l_flags |= LDLM_FL_CP_REQD;
584                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
585                 LASSERT(cfs_list_empty(&lock->l_cp_ast));
586                 cfs_list_add(&lock->l_cp_ast, work_list);
587                 LDLM_LOCK_GET(lock);
588         }
589 }
590
591 /* must be called with lr_lock held */
592 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
593                             cfs_list_t *work_list)
594 {
595         ENTRY;
596         check_res_locked(lock->l_resource);
597         if (new)
598                 ldlm_add_bl_work_item(lock, new, work_list);
599         else
600                 ldlm_add_cp_work_item(lock, work_list);
601         EXIT;
602 }
603
604 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
605 {
606         struct ldlm_lock *lock;
607
608         lock = ldlm_handle2lock(lockh);
609         LASSERT(lock != NULL);
610         ldlm_lock_addref_internal(lock, mode);
611         LDLM_LOCK_PUT(lock);
612 }
613
614 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
615 {
616         ldlm_lock_remove_from_lru(lock);
617         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
618                 lock->l_readers++;
619                 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
620         }
621         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
622                 lock->l_writers++;
623                 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
624         }
625         LDLM_LOCK_GET(lock);
626         lu_ref_add_atomic(&lock->l_reference, "user", lock);
627         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
628 }
629
630 /**
631  * Attempts to addref a lock, and fails if lock is already LDLM_FL_CBPENDING
632  * or destroyed.
633  *
634  * \retval 0 success, lock was addref-ed
635  *
636  * \retval -EAGAIN lock is being canceled.
637  */
638 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
639 {
640         struct ldlm_lock *lock;
641         int               result;
642
643         result = -EAGAIN;
644         lock = ldlm_handle2lock(lockh);
645         if (lock != NULL) {
646                 lock_res_and_lock(lock);
647                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
648                     !(lock->l_flags & LDLM_FL_CBPENDING)) {
649                         ldlm_lock_addref_internal_nolock(lock, mode);
650                         result = 0;
651                 }
652                 unlock_res_and_lock(lock);
653                 LDLM_LOCK_PUT(lock);
654         }
655         return result;
656 }
657
658 /* only called for local locks */
659 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
660 {
661         lock_res_and_lock(lock);
662         ldlm_lock_addref_internal_nolock(lock, mode);
663         unlock_res_and_lock(lock);
664 }
665
666 /* only called in ldlm_flock_destroy and for local locks.
667  *  * for LDLM_FLOCK type locks, l_blocking_ast is null, and
668  *   * ldlm_lock_remove_from_lru() does nothing, it is safe
669  *    * for ldlm_flock_destroy usage by dropping some code */
670 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
671 {
672         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
673         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
674                 LASSERT(lock->l_readers > 0);
675                 lu_ref_del(&lock->l_reference, "reader", lock);
676                 lock->l_readers--;
677         }
678         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
679                 LASSERT(lock->l_writers > 0);
680                 lu_ref_del(&lock->l_reference, "writer", lock);
681                 lock->l_writers--;
682         }
683
684         lu_ref_del(&lock->l_reference, "user", lock);
685         LDLM_LOCK_RELEASE(lock);    /* matches the LDLM_LOCK_GET() in addref */
686 }
687
688 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
689 {
690         struct ldlm_namespace *ns;
691         ENTRY;
692
693         lock_res_and_lock(lock);
694
695         ns = ldlm_lock_to_ns(lock);
696
697         ldlm_lock_decref_internal_nolock(lock, mode);
698
699         if (lock->l_flags & LDLM_FL_LOCAL &&
700             !lock->l_readers && !lock->l_writers) {
701                 /* If this is a local lock on a server namespace and this was
702                  * the last reference, cancel the lock. */
703                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
704                 lock->l_flags |= LDLM_FL_CBPENDING;
705         }
706
707         if (!lock->l_readers && !lock->l_writers &&
708             (lock->l_flags & LDLM_FL_CBPENDING)) {
709                 /* If we received a blocked AST and this was the last reference,
710                  * run the callback. */
711                 if (lock->l_ns_srv && lock->l_export)
712                         CERROR("FL_CBPENDING set on non-local lock--just a "
713                                "warning\n");
714
715                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
716
717                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
718                 ldlm_lock_remove_from_lru(lock);
719                 unlock_res_and_lock(lock);
720
721                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
722                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
723
724                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
725                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
726                         ldlm_handle_bl_callback(ns, NULL, lock);
727         } else if (ns_is_client(ns) &&
728                    !lock->l_readers && !lock->l_writers &&
729                    !(lock->l_flags & LDLM_FL_BL_AST)) {
730                 /* If this is a client-side namespace and this was the last
731                  * reference, put it on the LRU. */
732                 ldlm_lock_add_to_lru(lock);
733                 unlock_res_and_lock(lock);
734
735                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
736                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
737
738                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
739                  * are not supported by the server, otherwise, it is done on
740                  * enqueue. */
741                 if (!exp_connect_cancelset(lock->l_conn_export) &&
742                     !ns_connect_lru_resize(ns))
743                         ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
744         } else {
745                 unlock_res_and_lock(lock);
746         }
747
748         EXIT;
749 }
750
751 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
752 {
753         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
754         LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
755         ldlm_lock_decref_internal(lock, mode);
756         LDLM_LOCK_PUT(lock);
757 }
758
759 /* This will drop a lock reference and mark it for destruction, but will not
760  * necessarily cancel the lock before returning. */
761 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
762 {
763         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
764         ENTRY;
765
766         LASSERT(lock != NULL);
767
768         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
769         lock_res_and_lock(lock);
770         lock->l_flags |= LDLM_FL_CBPENDING;
771         unlock_res_and_lock(lock);
772         ldlm_lock_decref_internal(lock, mode);
773         LDLM_LOCK_PUT(lock);
774 }
775
776 struct sl_insert_point {
777         cfs_list_t *res_link;
778         cfs_list_t *mode_link;
779         cfs_list_t *policy_link;
780 };
781
782 /*
783  * search_granted_lock
784  *
785  * Description:
786  *      Finds a position to insert the new lock.
787  * Parameters:
788  *      queue [input]:  the granted list where search acts on;
789  *      req [input]:    the lock whose position to be located;
790  *      prev [output]:  positions within 3 lists to insert @req to
791  * Return Value:
792  *      filled @prev
793  * NOTE: called by
794  *  - ldlm_grant_lock_with_skiplist
795  */
796 static void search_granted_lock(cfs_list_t *queue,
797                                 struct ldlm_lock *req,
798                                 struct sl_insert_point *prev)
799 {
800         cfs_list_t *tmp;
801         struct ldlm_lock *lock, *mode_end, *policy_end;
802         ENTRY;
803
804         cfs_list_for_each(tmp, queue) {
805                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
806
807                 mode_end = cfs_list_entry(lock->l_sl_mode.prev,
808                                           struct ldlm_lock, l_sl_mode);
809
810                 if (lock->l_req_mode != req->l_req_mode) {
811                         /* jump to last lock of mode group */
812                         tmp = &mode_end->l_res_link;
813                         continue;
814                 }
815
816                 /* suitable mode group is found */
817                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
818                         /* insert point is last lock of the mode group */
819                         prev->res_link = &mode_end->l_res_link;
820                         prev->mode_link = &mode_end->l_sl_mode;
821                         prev->policy_link = &req->l_sl_policy;
822                         EXIT;
823                         return;
824                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
825                         for (;;) {
826                                 policy_end =
827                                         cfs_list_entry(lock->l_sl_policy.prev,
828                                                        struct ldlm_lock,
829                                                        l_sl_policy);
830
831                                 if (lock->l_policy_data.l_inodebits.bits ==
832                                     req->l_policy_data.l_inodebits.bits) {
833                                         /* insert point is last lock of
834                                          * the policy group */
835                                         prev->res_link =
836                                                 &policy_end->l_res_link;
837                                         prev->mode_link =
838                                                 &policy_end->l_sl_mode;
839                                         prev->policy_link =
840                                                 &policy_end->l_sl_policy;
841                                         EXIT;
842                                         return;
843                                 }
844
845                                 if (policy_end == mode_end)
846                                         /* done with mode group */
847                                         break;
848
849                                 /* go to next policy group within mode group */
850                                 tmp = policy_end->l_res_link.next;
851                                 lock = cfs_list_entry(tmp, struct ldlm_lock,
852                                                       l_res_link);
853                         }  /* loop over policy groups within the mode group */
854
855                         /* insert point is last lock of the mode group,
856                          * new policy group is started */
857                         prev->res_link = &mode_end->l_res_link;
858                         prev->mode_link = &mode_end->l_sl_mode;
859                         prev->policy_link = &req->l_sl_policy;
860                         EXIT;
861                         return;
862                 } else {
863                         LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
864                         LBUG();
865                 }
866         }
867
868         /* insert point is last lock on the queue,
869          * new mode group and new policy group are started */
870         prev->res_link = queue->prev;
871         prev->mode_link = &req->l_sl_mode;
872         prev->policy_link = &req->l_sl_policy;
873         EXIT;
874         return;
875 }
876
877 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
878                                        struct sl_insert_point *prev)
879 {
880         struct ldlm_resource *res = lock->l_resource;
881         ENTRY;
882
883         check_res_locked(res);
884
885         ldlm_resource_dump(D_INFO, res);
886         CDEBUG(D_OTHER, "About to add this lock:\n");
887         ldlm_lock_dump(D_OTHER, lock, 0);
888
889         if (lock->l_destroyed) {
890                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
891                 return;
892         }
893
894         LASSERT(cfs_list_empty(&lock->l_res_link));
895         LASSERT(cfs_list_empty(&lock->l_sl_mode));
896         LASSERT(cfs_list_empty(&lock->l_sl_policy));
897
898         cfs_list_add(&lock->l_res_link, prev->res_link);
899         cfs_list_add(&lock->l_sl_mode, prev->mode_link);
900         cfs_list_add(&lock->l_sl_policy, prev->policy_link);
901
902         EXIT;
903 }
904
905 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
906 {
907         struct sl_insert_point prev;
908         ENTRY;
909
910         LASSERT(lock->l_req_mode == lock->l_granted_mode);
911
912         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
913         ldlm_granted_list_add_lock(lock, &prev);
914         EXIT;
915 }
916
917 /* NOTE: called by
918  *  - ldlm_lock_enqueue
919  *  - ldlm_reprocess_queue
920  *  - ldlm_lock_convert
921  *
922  * must be called with lr_lock held
923  */
924 void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
925 {
926         struct ldlm_resource *res = lock->l_resource;
927         ENTRY;
928
929         check_res_locked(res);
930
931         lock->l_granted_mode = lock->l_req_mode;
932         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
933                 ldlm_grant_lock_with_skiplist(lock);
934         else if (res->lr_type == LDLM_EXTENT)
935                 ldlm_extent_add_lock(res, lock);
936         else
937                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
938
939         if (lock->l_granted_mode < res->lr_most_restr)
940                 res->lr_most_restr = lock->l_granted_mode;
941
942         if (work_list && lock->l_completion_ast != NULL)
943                 ldlm_add_ast_work_item(lock, NULL, work_list);
944
945         ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
946         EXIT;
947 }
948
949 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
950  * comment above ldlm_lock_match */
951 static struct ldlm_lock *search_queue(cfs_list_t *queue,
952                                       ldlm_mode_t *mode,
953                                       ldlm_policy_data_t *policy,
954                                       struct ldlm_lock *old_lock,
955                                       int flags, int unref)
956 {
957         struct ldlm_lock *lock;
958         cfs_list_t       *tmp;
959
960         cfs_list_for_each(tmp, queue) {
961                 ldlm_mode_t match;
962
963                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
964
965                 if (lock == old_lock)
966                         break;
967
968                 /* llite sometimes wants to match locks that will be
969                  * canceled when their users drop, but we allow it to match
970                  * if it passes in CBPENDING and the lock still has users.
971                  * this is generally only going to be used by children
972                  * whose parents already hold a lock so forward progress
973                  * can still happen. */
974                 if (lock->l_flags & LDLM_FL_CBPENDING &&
975                     !(flags & LDLM_FL_CBPENDING))
976                         continue;
977                 if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
978                     lock->l_readers == 0 && lock->l_writers == 0)
979                         continue;
980
981                 if (!(lock->l_req_mode & *mode))
982                         continue;
983                 match = lock->l_req_mode;
984
985                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
986                     (lock->l_policy_data.l_extent.start >
987                      policy->l_extent.start ||
988                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
989                         continue;
990
991                 if (unlikely(match == LCK_GROUP) &&
992                     lock->l_resource->lr_type == LDLM_EXTENT &&
993                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
994                         continue;
995
996                 /* We match if we have existing lock with same or wider set
997                    of bits. */
998                 if (lock->l_resource->lr_type == LDLM_IBITS &&
999                      ((lock->l_policy_data.l_inodebits.bits &
1000                       policy->l_inodebits.bits) !=
1001                       policy->l_inodebits.bits))
1002                         continue;
1003
1004                 if (!unref &&
1005                     (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED)))
1006                         continue;
1007
1008                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
1009                     !(lock->l_flags & LDLM_FL_LOCAL))
1010                         continue;
1011
1012                 if (flags & LDLM_FL_TEST_LOCK) {
1013                         LDLM_LOCK_GET(lock);
1014                         ldlm_lock_touch_in_lru(lock);
1015                 } else {
1016                         ldlm_lock_addref_internal_nolock(lock, match);
1017                 }
1018                 *mode = match;
1019                 return lock;
1020         }
1021
1022         return NULL;
1023 }
1024
1025 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1026 {
1027         lock->l_flags |= LDLM_FL_LVB_READY;
1028         cfs_waitq_signal(&lock->l_waitq);
1029 }
1030
1031 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1032 {
1033         lock_res_and_lock(lock);
1034         ldlm_lock_allow_match_locked(lock);
1035         unlock_res_and_lock(lock);
1036 }
1037
1038 /* Can be called in two ways:
1039  *
1040  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1041  * for a duplicate of.
1042  *
1043  * Otherwise, all of the fields must be filled in, to match against.
1044  *
1045  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1046  *     server (ie, connh is NULL)
1047  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1048  *     list will be considered
1049  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1050  *     to be canceled can still be matched as long as they still have reader
1051  *     or writer refernces
1052  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1053  *     just tell us if we would have matched.
1054  *
1055  * Returns 1 if it finds an already-existing lock that is compatible; in this
1056  * case, lockh is filled in with a addref()ed lock
1057  *
1058  * we also check security context, if that failed we simply return 0 (to keep
1059  * caller code unchanged), the context failure will be discovered by caller
1060  * sometime later.
1061  */
1062 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1063                             const struct ldlm_res_id *res_id, ldlm_type_t type,
1064                             ldlm_policy_data_t *policy, ldlm_mode_t mode,
1065                             struct lustre_handle *lockh, int unref)
1066 {
1067         struct ldlm_resource *res;
1068         struct ldlm_lock *lock, *old_lock = NULL;
1069         int rc = 0;
1070         ENTRY;
1071
1072         if (ns == NULL) {
1073                 old_lock = ldlm_handle2lock(lockh);
1074                 LASSERT(old_lock);
1075
1076                 ns = ldlm_lock_to_ns(old_lock);
1077                 res_id = &old_lock->l_resource->lr_name;
1078                 type = old_lock->l_resource->lr_type;
1079                 mode = old_lock->l_req_mode;
1080         }
1081
1082         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1083         if (res == NULL) {
1084                 LASSERT(old_lock == NULL);
1085                 RETURN(0);
1086         }
1087
1088         LDLM_RESOURCE_ADDREF(res);
1089         lock_res(res);
1090
1091         lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
1092                             flags, unref);
1093         if (lock != NULL)
1094                 GOTO(out, rc = 1);
1095         if (flags & LDLM_FL_BLOCK_GRANTED)
1096                 GOTO(out, rc = 0);
1097         lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
1098                             flags, unref);
1099         if (lock != NULL)
1100                 GOTO(out, rc = 1);
1101         lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
1102                             flags, unref);
1103         if (lock != NULL)
1104                 GOTO(out, rc = 1);
1105
1106         EXIT;
1107  out:
1108         unlock_res(res);
1109         LDLM_RESOURCE_DELREF(res);
1110         ldlm_resource_putref(res);
1111
1112         if (lock) {
1113                 ldlm_lock2handle(lock, lockh);
1114                 if ((flags & LDLM_FL_LVB_READY) &&
1115                     (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1116                         struct l_wait_info lwi;
1117                         if (lock->l_completion_ast) {
1118                                 int err = lock->l_completion_ast(lock,
1119                                                           LDLM_FL_WAIT_NOREPROC,
1120                                                                  NULL);
1121                                 if (err) {
1122                                         if (flags & LDLM_FL_TEST_LOCK)
1123                                                 LDLM_LOCK_RELEASE(lock);
1124                                         else
1125                                                 ldlm_lock_decref_internal(lock,
1126                                                                           mode);
1127                                         rc = 0;
1128                                         goto out2;
1129                                 }
1130                         }
1131
1132                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1133                                                NULL, LWI_ON_SIGNAL_NOOP, NULL);
1134
1135                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1136                         l_wait_event(lock->l_waitq,
1137                                      (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1138                 }
1139         }
1140  out2:
1141         if (rc) {
1142                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1143                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1144                                 res_id->name[2] : policy->l_extent.start,
1145                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1146                                 res_id->name[3] : policy->l_extent.end);
1147
1148                 /* check user's security context */
1149                 if (lock->l_conn_export &&
1150                     sptlrpc_import_check_ctx(
1151                                 class_exp2cliimp(lock->l_conn_export))) {
1152                         if (!(flags & LDLM_FL_TEST_LOCK))
1153                                 ldlm_lock_decref_internal(lock, mode);
1154                         rc = 0;
1155                 }
1156
1157                 if (flags & LDLM_FL_TEST_LOCK)
1158                         LDLM_LOCK_RELEASE(lock);
1159
1160         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1161                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1162                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1163                                   type, mode, res_id->name[0], res_id->name[1],
1164                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1165                                         res_id->name[2] :policy->l_extent.start,
1166                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1167                                         res_id->name[3] : policy->l_extent.end);
1168         }
1169         if (old_lock)
1170                 LDLM_LOCK_PUT(old_lock);
1171
1172         return rc ? mode : 0;
1173 }
1174
1175 /* Returns a referenced lock */
1176 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1177                                    const struct ldlm_res_id *res_id,
1178                                    ldlm_type_t type,
1179                                    ldlm_mode_t mode,
1180                                    const struct ldlm_callback_suite *cbs,
1181                                    void *data, __u32 lvb_len)
1182 {
1183         struct ldlm_lock *lock;
1184         struct ldlm_resource *res;
1185         ENTRY;
1186
1187         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1188         if (res == NULL)
1189                 RETURN(NULL);
1190
1191         lock = ldlm_lock_new(res);
1192
1193         if (lock == NULL)
1194                 RETURN(NULL);
1195
1196         lock->l_req_mode = mode;
1197         lock->l_ast_data = data;
1198         lock->l_pid = cfs_curproc_pid();
1199         lock->l_ns_srv = ns_is_server(ns);
1200         if (cbs) {
1201                 lock->l_blocking_ast = cbs->lcs_blocking;
1202                 lock->l_completion_ast = cbs->lcs_completion;
1203                 lock->l_glimpse_ast = cbs->lcs_glimpse;
1204                 lock->l_weigh_ast = cbs->lcs_weigh;
1205         }
1206
1207         lock->l_tree_node = NULL;
1208         /* if this is the extent lock, allocate the interval tree node */
1209         if (type == LDLM_EXTENT) {
1210                 if (ldlm_interval_alloc(lock) == NULL)
1211                         GOTO(out, 0);
1212         }
1213
1214         if (lvb_len) {
1215                 lock->l_lvb_len = lvb_len;
1216                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1217                 if (lock->l_lvb_data == NULL)
1218                         GOTO(out, 0);
1219         }
1220
1221         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1222                 GOTO(out, 0);
1223
1224         RETURN(lock);
1225
1226 out:
1227         ldlm_lock_destroy(lock);
1228         LDLM_LOCK_RELEASE(lock);
1229         return NULL;
1230 }
1231
1232 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1233                                struct ldlm_lock **lockp,
1234                                void *cookie, int *flags)
1235 {
1236         struct ldlm_lock *lock = *lockp;
1237         struct ldlm_resource *res = lock->l_resource;
1238         int local = ns_is_client(ldlm_res_to_ns(res));
1239         ldlm_processing_policy policy;
1240         ldlm_error_t rc = ELDLM_OK;
1241         struct ldlm_interval *node = NULL;
1242         ENTRY;
1243
1244         lock->l_last_activity = cfs_time_current_sec();
1245         /* policies are not executed on the client or during replay */
1246         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1247             && !local && ns->ns_policy) {
1248                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1249                                    NULL);
1250                 if (rc == ELDLM_LOCK_REPLACED) {
1251                         /* The lock that was returned has already been granted,
1252                          * and placed into lockp.  If it's not the same as the
1253                          * one we passed in, then destroy the old one and our
1254                          * work here is done. */
1255                         if (lock != *lockp) {
1256                                 ldlm_lock_destroy(lock);
1257                                 LDLM_LOCK_RELEASE(lock);
1258                         }
1259                         *flags |= LDLM_FL_LOCK_CHANGED;
1260                         RETURN(0);
1261                 } else if (rc != ELDLM_OK ||
1262                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1263                         ldlm_lock_destroy(lock);
1264                         RETURN(rc);
1265                 }
1266         }
1267
1268         /* For a replaying lock, it might be already in granted list. So
1269          * unlinking the lock will cause the interval node to be freed, we
1270          * have to allocate the interval node early otherwise we can't regrant
1271          * this lock in the future. - jay */
1272         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1273                 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1274
1275         lock_res_and_lock(lock);
1276         if (local && lock->l_req_mode == lock->l_granted_mode) {
1277                 /* The server returned a blocked lock, but it was granted
1278                  * before we got a chance to actually enqueue it.  We don't
1279                  * need to do anything else. */
1280                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1281                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1282                 GOTO(out, ELDLM_OK);
1283         }
1284
1285         ldlm_resource_unlink_lock(lock);
1286         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1287                 if (node == NULL) {
1288                         ldlm_lock_destroy_nolock(lock);
1289                         GOTO(out, rc = -ENOMEM);
1290                 }
1291
1292                 CFS_INIT_LIST_HEAD(&node->li_group);
1293                 ldlm_interval_attach(node, lock);
1294                 node = NULL;
1295         }
1296
1297         /* Some flags from the enqueue want to make it into the AST, via the
1298          * lock's l_flags. */
1299         lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1300
1301         /* This distinction between local lock trees is very important; a client
1302          * namespace only has information about locks taken by that client, and
1303          * thus doesn't have enough information to decide for itself if it can
1304          * be granted (below).  In this case, we do exactly what the server
1305          * tells us to do, as dictated by the 'flags'.
1306          *
1307          * We do exactly the same thing during recovery, when the server is
1308          * more or less trusting the clients not to lie.
1309          *
1310          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1311          * granted/converting queues. */
1312         if (local) {
1313                 if (*flags & LDLM_FL_BLOCK_CONV)
1314                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1315                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1316                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1317                 else
1318                         ldlm_grant_lock(lock, NULL);
1319                 GOTO(out, ELDLM_OK);
1320         } else if (*flags & LDLM_FL_REPLAY) {
1321                 if (*flags & LDLM_FL_BLOCK_CONV) {
1322                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1323                         GOTO(out, ELDLM_OK);
1324                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1325                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1326                         GOTO(out, ELDLM_OK);
1327                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1328                         ldlm_grant_lock(lock, NULL);
1329                         GOTO(out, ELDLM_OK);
1330                 }
1331                 /* If no flags, fall through to normal enqueue path. */
1332         }
1333
1334         policy = ldlm_processing_policy_table[res->lr_type];
1335         policy(lock, flags, 1, &rc, NULL);
1336         GOTO(out, rc);
1337 out:
1338         unlock_res_and_lock(lock);
1339         if (node)
1340                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1341         return rc;
1342 }
1343
1344 /* Must be called with namespace taken: queue is waiting or converting. */
1345 int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
1346                          cfs_list_t *work_list)
1347 {
1348         cfs_list_t *tmp, *pos;
1349         ldlm_processing_policy policy;
1350         int flags;
1351         int rc = LDLM_ITER_CONTINUE;
1352         ldlm_error_t err;
1353         ENTRY;
1354
1355         check_res_locked(res);
1356
1357         policy = ldlm_processing_policy_table[res->lr_type];
1358         LASSERT(policy);
1359
1360         cfs_list_for_each_safe(tmp, pos, queue) {
1361                 struct ldlm_lock *pending;
1362                 pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
1363
1364                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1365
1366                 flags = 0;
1367                 rc = policy(pending, &flags, 0, &err, work_list);
1368                 if (rc != LDLM_ITER_CONTINUE)
1369                         break;
1370         }
1371
1372         RETURN(rc);
1373 }
1374
1375 /* Helper function for ldlm_run_ast_work().
1376  *
1377  * Send an existing rpc set specified by @arg->set and then
1378  * destroy it. Create new one if @do_create flag is set. */
1379 static void
1380 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1381 {
1382         ENTRY;
1383
1384         ptlrpc_set_wait(arg->set);
1385         if (arg->type == LDLM_BL_CALLBACK)
1386                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1387         ptlrpc_set_destroy(arg->set);
1388
1389         if (do_create)
1390                 arg->set = ptlrpc_prep_set();
1391
1392         EXIT;
1393 }
1394
1395 static int
1396 ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1397 {
1398         struct ldlm_lock_desc d;
1399         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1400                                                 l_bl_ast);
1401         ENTRY;
1402
1403         /* nobody should touch l_bl_ast */
1404         lock_res_and_lock(lock);
1405         cfs_list_del_init(&lock->l_bl_ast);
1406
1407         LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1408         LASSERT(lock->l_bl_ast_run == 0);
1409         LASSERT(lock->l_blocking_lock);
1410         lock->l_bl_ast_run++;
1411         unlock_res_and_lock(lock);
1412
1413         ldlm_lock2desc(lock->l_blocking_lock, &d);
1414
1415         lock->l_blocking_ast(lock, &d, (void *)arg,
1416                              LDLM_CB_BLOCKING);
1417         LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1418         lock->l_blocking_lock = NULL;
1419         LDLM_LOCK_RELEASE(lock);
1420
1421         RETURN(1);
1422 }
1423
1424 static int
1425 ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1426 {
1427         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
1428         ldlm_completion_callback completion_callback;
1429         int rc = 0;
1430         ENTRY;
1431
1432         /* It's possible to receive a completion AST before we've set
1433          * the l_completion_ast pointer: either because the AST arrived
1434          * before the reply, or simply because there's a small race
1435          * window between receiving the reply and finishing the local
1436          * enqueue. (bug 842)
1437          *
1438          * This can't happen with the blocking_ast, however, because we
1439          * will never call the local blocking_ast until we drop our
1440          * reader/writer reference, which we won't do until we get the
1441          * reply and finish enqueueing. */
1442
1443         /* nobody should touch l_cp_ast */
1444         lock_res_and_lock(lock);
1445         cfs_list_del_init(&lock->l_cp_ast);
1446         LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1447         /* save l_completion_ast since it can be changed by
1448          * mds_intent_policy(), see bug 14225 */
1449         completion_callback = lock->l_completion_ast;
1450         lock->l_flags &= ~LDLM_FL_CP_REQD;
1451         unlock_res_and_lock(lock);
1452
1453         if (completion_callback != NULL) {
1454                 completion_callback(lock, 0, (void *)arg);
1455                 rc = 1;
1456         }
1457         LDLM_LOCK_RELEASE(lock);
1458
1459         RETURN(rc);
1460 }
1461
1462 static int
1463 ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1464 {
1465         struct ldlm_lock_desc desc;
1466         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1467                                                 l_rk_ast);
1468         ENTRY;
1469
1470         cfs_list_del_init(&lock->l_rk_ast);
1471
1472         /* the desc just pretend to exclusive */
1473         ldlm_lock2desc(lock, &desc);
1474         desc.l_req_mode = LCK_EX;
1475         desc.l_granted_mode = 0;
1476
1477         lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1478         LDLM_LOCK_RELEASE(lock);
1479
1480         RETURN(1);
1481 }
1482
1483 int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
1484 {
1485         struct ldlm_cb_set_arg arg;
1486         cfs_list_t *tmp, *pos;
1487         int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
1488         int ast_count;
1489         ENTRY;
1490
1491         if (cfs_list_empty(rpc_list))
1492                 RETURN(0);
1493
1494         arg.set = ptlrpc_prep_set();
1495         if (NULL == arg.set)
1496                 RETURN(-ERESTART);
1497         cfs_atomic_set(&arg.restart, 0);
1498         switch (ast_type) {
1499         case LDLM_WORK_BL_AST:
1500                 arg.type = LDLM_BL_CALLBACK;
1501                 work_ast_lock = ldlm_work_bl_ast_lock;
1502                 break;
1503         case LDLM_WORK_CP_AST:
1504                 arg.type = LDLM_CP_CALLBACK;
1505                 work_ast_lock = ldlm_work_cp_ast_lock;
1506                 break;
1507         case LDLM_WORK_REVOKE_AST:
1508                 arg.type = LDLM_BL_CALLBACK;
1509                 work_ast_lock = ldlm_work_revoke_ast_lock;
1510                 break;
1511         default:
1512                 LBUG();
1513         }
1514
1515         ast_count = 0;
1516         cfs_list_for_each_safe(tmp, pos, rpc_list) {
1517                 ast_count += work_ast_lock(tmp, &arg);
1518
1519                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1520                  * and create a new set for requests that remained in
1521                  * @rpc_list */
1522                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1523                         ldlm_send_and_maybe_create_set(&arg, 1);
1524                         ast_count = 0;
1525                 }
1526         }
1527
1528         if (ast_count > 0)
1529                 ldlm_send_and_maybe_create_set(&arg, 0);
1530         else
1531                 /* In case when number of ASTs is multiply of
1532                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1533                  * @arg.set must be destroyed here, otherwise we get
1534                  * write memory leaking. */
1535                 ptlrpc_set_destroy(arg.set);
1536
1537         RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
1538 }
1539
1540 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1541 {
1542         ldlm_reprocess_all(res);
1543         return LDLM_ITER_CONTINUE;
1544 }
1545
1546 static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1547                               cfs_hlist_node_t *hnode, void *arg)
1548 {
1549         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1550         int    rc;
1551
1552         rc = reprocess_one_queue(res, arg);
1553
1554         return rc == LDLM_ITER_STOP;
1555 }
1556
1557 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1558 {
1559         ENTRY;
1560
1561         if (ns != NULL) {
1562                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1563                                          ldlm_reprocess_res, NULL);
1564         }
1565         EXIT;
1566 }
1567
1568 void ldlm_reprocess_all(struct ldlm_resource *res)
1569 {
1570         CFS_LIST_HEAD(rpc_list);
1571         int rc;
1572         ENTRY;
1573
1574         /* Local lock trees don't get reprocessed. */
1575         if (ns_is_client(ldlm_res_to_ns(res))) {
1576                 EXIT;
1577                 return;
1578         }
1579
1580  restart:
1581         lock_res(res);
1582         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1583         if (rc == LDLM_ITER_CONTINUE)
1584                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1585         unlock_res(res);
1586
1587         rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1588         if (rc == -ERESTART) {
1589                 LASSERT(cfs_list_empty(&rpc_list));
1590                 goto restart;
1591         }
1592         EXIT;
1593 }
1594
1595 void ldlm_cancel_callback(struct ldlm_lock *lock)
1596 {
1597         check_res_locked(lock->l_resource);
1598         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1599                 lock->l_flags |= LDLM_FL_CANCEL;
1600                 if (lock->l_blocking_ast) {
1601                         // l_check_no_ns_lock(ns);
1602                         unlock_res_and_lock(lock);
1603                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1604                                              LDLM_CB_CANCELING);
1605                         lock_res_and_lock(lock);
1606                 } else {
1607                         LDLM_DEBUG(lock, "no blocking ast");
1608                 }
1609         }
1610         lock->l_flags |= LDLM_FL_BL_DONE;
1611 }
1612
1613 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1614 {
1615         if (req->l_resource->lr_type != LDLM_PLAIN &&
1616             req->l_resource->lr_type != LDLM_IBITS)
1617                 return;
1618
1619         cfs_list_del_init(&req->l_sl_policy);
1620         cfs_list_del_init(&req->l_sl_mode);
1621 }
1622
1623 void ldlm_lock_cancel(struct ldlm_lock *lock)
1624 {
1625         struct ldlm_resource *res;
1626         struct ldlm_namespace *ns;
1627         ENTRY;
1628
1629         lock_res_and_lock(lock);
1630
1631         res = lock->l_resource;
1632         ns  = ldlm_res_to_ns(res);
1633
1634         /* Please do not, no matter how tempting, remove this LBUG without
1635          * talking to me first. -phik */
1636         if (lock->l_readers || lock->l_writers) {
1637                 LDLM_ERROR(lock, "lock still has references");
1638                 LBUG();
1639         }
1640
1641         ldlm_del_waiting_lock(lock);
1642
1643         /* Releases cancel callback. */
1644         ldlm_cancel_callback(lock);
1645
1646         /* Yes, second time, just in case it was added again while we were
1647            running with no res lock in ldlm_cancel_callback */
1648         ldlm_del_waiting_lock(lock);
1649         ldlm_resource_unlink_lock(lock);
1650         ldlm_lock_destroy_nolock(lock);
1651
1652         if (lock->l_granted_mode == lock->l_req_mode)
1653                 ldlm_pool_del(&ns->ns_pool, lock);
1654
1655         /* Make sure we will not be called again for same lock what is possible
1656          * if not to zero out lock->l_granted_mode */
1657         lock->l_granted_mode = LCK_MINMODE;
1658         unlock_res_and_lock(lock);
1659
1660         EXIT;
1661 }
1662
1663 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1664 {
1665         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1666         ENTRY;
1667
1668         if (lock == NULL)
1669                 RETURN(-EINVAL);
1670
1671         lock->l_ast_data = data;
1672         LDLM_LOCK_PUT(lock);
1673         RETURN(0);
1674 }
1675
1676 int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1677                                     cfs_hlist_node_t *hnode, void *data)
1678
1679 {
1680         struct obd_export    *exp  = data;
1681         struct ldlm_lock     *lock = cfs_hash_object(hs, hnode);
1682         struct ldlm_resource *res;
1683
1684         res = ldlm_resource_getref(lock->l_resource);
1685         LDLM_LOCK_GET(lock);
1686
1687         LDLM_DEBUG(lock, "export %p", exp);
1688         ldlm_res_lvbo_update(res, NULL, 1);
1689         ldlm_lock_cancel(lock);
1690         ldlm_reprocess_all(res);
1691         ldlm_resource_putref(res);
1692         LDLM_LOCK_RELEASE(lock);
1693         return 0;
1694 }
1695
1696 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1697 {
1698         cfs_hash_for_each_empty(exp->exp_lock_hash,
1699                                 ldlm_cancel_locks_for_export_cb, exp);
1700 }
1701
1702 /**
1703  * Downgrade an exclusive lock.
1704  *
1705  * A fast variant of ldlm_lock_convert for convertion of exclusive
1706  * locks. The convertion is always successful.
1707  *
1708  * \param lock A lock to convert
1709  * \param new_mode new lock mode
1710  */
1711 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
1712 {
1713         ENTRY;
1714
1715         LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
1716         LASSERT(new_mode == LCK_COS);
1717
1718         lock_res_and_lock(lock);
1719         ldlm_resource_unlink_lock(lock);
1720         /*
1721          * Remove the lock from pool as it will be added again in
1722          * ldlm_grant_lock() called below.
1723          */
1724         ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
1725
1726         lock->l_req_mode = new_mode;
1727         ldlm_grant_lock(lock, NULL);
1728         unlock_res_and_lock(lock);
1729         ldlm_reprocess_all(lock->l_resource);
1730
1731         EXIT;
1732 }
1733
1734 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1735                                         __u32 *flags)
1736 {
1737         CFS_LIST_HEAD(rpc_list);
1738         struct ldlm_resource *res;
1739         struct ldlm_namespace *ns;
1740         int granted = 0;
1741         int old_mode, rc;
1742         struct sl_insert_point prev;
1743         ldlm_error_t err;
1744         struct ldlm_interval *node;
1745         ENTRY;
1746
1747         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1748                 *flags |= LDLM_FL_BLOCK_GRANTED;
1749                 RETURN(lock->l_resource);
1750         }
1751
1752         /* I can't check the type of lock here because the bitlock of lock
1753          * is not held here, so do the allocation blindly. -jay */
1754         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1755         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
1756                 RETURN(NULL);
1757
1758         LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
1759                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1760
1761         lock_res_and_lock(lock);
1762
1763         res = lock->l_resource;
1764         ns  = ldlm_res_to_ns(res);
1765
1766         old_mode = lock->l_req_mode;
1767         lock->l_req_mode = new_mode;
1768         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1769                 /* remember the lock position where the lock might be
1770                  * added back to the granted list later and also
1771                  * remember the join mode for skiplist fixing. */
1772                 prev.res_link = lock->l_res_link.prev;
1773                 prev.mode_link = lock->l_sl_mode.prev;
1774                 prev.policy_link = lock->l_sl_policy.prev;
1775                 ldlm_resource_unlink_lock(lock);
1776         } else {
1777                 ldlm_resource_unlink_lock(lock);
1778                 if (res->lr_type == LDLM_EXTENT) {
1779                         /* FIXME: ugly code, I have to attach the lock to a
1780                          * interval node again since perhaps it will be granted
1781                          * soon */
1782                         CFS_INIT_LIST_HEAD(&node->li_group);
1783                         ldlm_interval_attach(node, lock);
1784                         node = NULL;
1785                 }
1786         }
1787
1788         /*
1789          * Remove old lock from the pool before adding the lock with new
1790          * mode below in ->policy()
1791          */
1792         ldlm_pool_del(&ns->ns_pool, lock);
1793
1794         /* If this is a local resource, put it on the appropriate list. */
1795         if (ns_is_client(ldlm_res_to_ns(res))) {
1796                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1797                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1798                 } else {
1799                         /* This should never happen, because of the way the
1800                          * server handles conversions. */
1801                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1802                                    *flags);
1803                         LBUG();
1804
1805                         ldlm_grant_lock(lock, &rpc_list);
1806                         granted = 1;
1807                         /* FIXME: completion handling not with lr_lock held ! */
1808                         if (lock->l_completion_ast)
1809                                 lock->l_completion_ast(lock, 0, NULL);
1810                 }
1811         } else {
1812                 int pflags = 0;
1813                 ldlm_processing_policy policy;
1814                 policy = ldlm_processing_policy_table[res->lr_type];
1815                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1816                 if (rc == LDLM_ITER_STOP) {
1817                         lock->l_req_mode = old_mode;
1818                         if (res->lr_type == LDLM_EXTENT)
1819                                 ldlm_extent_add_lock(res, lock);
1820                         else
1821                                 ldlm_granted_list_add_lock(lock, &prev);
1822
1823                         res = NULL;
1824                 } else {
1825                         *flags |= LDLM_FL_BLOCK_GRANTED;
1826                         granted = 1;
1827                 }
1828         }
1829         unlock_res_and_lock(lock);
1830
1831         if (granted)
1832                 ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1833         if (node)
1834                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1835         RETURN(res);
1836 }
1837
1838 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1839 {
1840         struct obd_device *obd = NULL;
1841
1842         if (!((libcfs_debug | D_ERROR) & level))
1843                 return;
1844
1845         if (!lock) {
1846                 CDEBUG(level, "  NULL LDLM lock\n");
1847                 return;
1848         }
1849
1850         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1851                lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1852                pos, lock->l_pid);
1853         if (lock->l_conn_export != NULL)
1854                 obd = lock->l_conn_export->exp_obd;
1855         if (lock->l_export && lock->l_export->exp_connection) {
1856                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1857                      libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1858                      lock->l_remote_handle.cookie);
1859         } else if (obd == NULL) {
1860                 CDEBUG(level, "  Node: local\n");
1861         } else {
1862                 struct obd_import *imp = obd->u.cli.cl_import;
1863                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1864                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1865                        lock->l_remote_handle.cookie);
1866         }
1867         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1868                   lock->l_resource,
1869                   lock->l_resource->lr_name.name[0],
1870                   lock->l_resource->lr_name.name[1],
1871                   lock->l_resource->lr_name.name[2]);
1872         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1873                "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
1874                ldlm_lockname[lock->l_granted_mode],
1875                cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1876                lock->l_flags);
1877         if (lock->l_resource->lr_type == LDLM_EXTENT)
1878                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1879                        " (req "LPU64"-"LPU64")\n",
1880                        lock->l_policy_data.l_extent.start,
1881                        lock->l_policy_data.l_extent.end,
1882                        lock->l_req_extent.start, lock->l_req_extent.end);
1883         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1884                 CDEBUG(level, "  Pid: %d Extent: "LPU64" -> "LPU64"\n",
1885                        lock->l_policy_data.l_flock.pid,
1886                        lock->l_policy_data.l_flock.start,
1887                        lock->l_policy_data.l_flock.end);
1888        else if (lock->l_resource->lr_type == LDLM_IBITS)
1889                 CDEBUG(level, "  Bits: "LPX64"\n",
1890                        lock->l_policy_data.l_inodebits.bits);
1891 }
1892
1893 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1894 {
1895         struct ldlm_lock *lock;
1896
1897         if (!((libcfs_debug | D_ERROR) & level))
1898                 return;
1899
1900         lock = ldlm_handle2lock(lockh);
1901         if (lock == NULL)
1902                 return;
1903
1904         ldlm_lock_dump(D_OTHER, lock, 0);
1905
1906         LDLM_LOCK_PUT(lock);
1907 }
1908
1909 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1910                       struct libcfs_debug_msg_data *data, const char *fmt,
1911                       ...)
1912 {
1913         va_list args;
1914         cfs_debug_limit_state_t *cdls = data->msg_cdls;
1915
1916         va_start(args, fmt);
1917
1918         if (lock->l_resource == NULL) {
1919                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1920                                    data->msg_fn, data->msg_line, fmt, args,
1921                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1922                        "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
1923                        LPX64" expref: %d pid: %u timeout: %lu\n", lock,
1924                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1925                        lock->l_readers, lock->l_writers,
1926                        ldlm_lockname[lock->l_granted_mode],
1927                        ldlm_lockname[lock->l_req_mode],
1928                        lock->l_flags, lock->l_remote_handle.cookie,
1929                        lock->l_export ?
1930                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1931                        lock->l_pid, lock->l_callback_timeout);
1932                 va_end(args);
1933                 return;
1934         }
1935
1936         switch (lock->l_resource->lr_type) {
1937         case LDLM_EXTENT:
1938                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1939                                    data->msg_fn, data->msg_line, fmt, args,
1940                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1941                        "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1942                        "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
1943                        " expref: %d pid: %u timeout %lu\n",
1944                        ldlm_lock_to_ns_name(lock), lock,
1945                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1946                        lock->l_readers, lock->l_writers,
1947                        ldlm_lockname[lock->l_granted_mode],
1948                        ldlm_lockname[lock->l_req_mode],
1949                        lock->l_resource->lr_name.name[0],
1950                        lock->l_resource->lr_name.name[1],
1951                        cfs_atomic_read(&lock->l_resource->lr_refcount),
1952                        ldlm_typename[lock->l_resource->lr_type],
1953                        lock->l_policy_data.l_extent.start,
1954                        lock->l_policy_data.l_extent.end,
1955                        lock->l_req_extent.start, lock->l_req_extent.end,
1956                        lock->l_flags, lock->l_remote_handle.cookie,
1957                        lock->l_export ?
1958                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1959                        lock->l_pid, lock->l_callback_timeout);
1960                 break;
1961
1962         case LDLM_FLOCK:
1963                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1964                                    data->msg_fn, data->msg_line, fmt, args,
1965                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1966                        "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1967                        "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
1968                        " expref: %d pid: %u timeout: %lu\n",
1969                        ldlm_lock_to_ns_name(lock), lock,
1970                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1971                        lock->l_readers, lock->l_writers,
1972                        ldlm_lockname[lock->l_granted_mode],
1973                        ldlm_lockname[lock->l_req_mode],
1974                        lock->l_resource->lr_name.name[0],
1975                        lock->l_resource->lr_name.name[1],
1976                        cfs_atomic_read(&lock->l_resource->lr_refcount),
1977                        ldlm_typename[lock->l_resource->lr_type],
1978                        lock->l_policy_data.l_flock.pid,
1979                        lock->l_policy_data.l_flock.start,
1980                        lock->l_policy_data.l_flock.end,
1981                        lock->l_flags, lock->l_remote_handle.cookie,
1982                        lock->l_export ?
1983                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1984                        lock->l_pid, lock->l_callback_timeout);
1985                 break;
1986
1987         case LDLM_IBITS:
1988                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1989                                    data->msg_fn, data->msg_line, fmt, args,
1990                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1991                        "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1992                        "flags: "LPX64" remote: "LPX64" expref: %d "
1993                        "pid: %u timeout: %lu\n",
1994                        ldlm_lock_to_ns_name(lock),
1995                        lock, lock->l_handle.h_cookie,
1996                        cfs_atomic_read (&lock->l_refc),
1997                        lock->l_readers, lock->l_writers,
1998                        ldlm_lockname[lock->l_granted_mode],
1999                        ldlm_lockname[lock->l_req_mode],
2000                        lock->l_resource->lr_name.name[0],
2001                        lock->l_resource->lr_name.name[1],
2002                        lock->l_policy_data.l_inodebits.bits,
2003                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2004                        ldlm_typename[lock->l_resource->lr_type],
2005                        lock->l_flags, lock->l_remote_handle.cookie,
2006                        lock->l_export ?
2007                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2008                        lock->l_pid, lock->l_callback_timeout);
2009                 break;
2010
2011         default:
2012                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2013                                    data->msg_fn, data->msg_line, fmt, args,
2014                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2015                        "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
2016                        "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
2017                        ldlm_lock_to_ns_name(lock),
2018                        lock, lock->l_handle.h_cookie,
2019                        cfs_atomic_read (&lock->l_refc),
2020                        lock->l_readers, lock->l_writers,
2021                        ldlm_lockname[lock->l_granted_mode],
2022                        ldlm_lockname[lock->l_req_mode],
2023                        lock->l_resource->lr_name.name[0],
2024                        lock->l_resource->lr_name.name[1],
2025                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2026                        ldlm_typename[lock->l_resource->lr_type],
2027                        lock->l_flags, lock->l_remote_handle.cookie,
2028                        lock->l_export ?
2029                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2030                        lock->l_pid, lock->l_callback_timeout);
2031                 break;
2032         }
2033         va_end(args);
2034 }
2035 EXPORT_SYMBOL(_ldlm_lock_debug);