Whamcloud - gitweb
Land b1_8_gate onto b1_8 (20081218_1708)
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lock.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 # include <linux/lustre_intent.h>
47 #else
48 # include <liblustre.h>
49 # include <libcfs/kp30.h>
50 #endif
51
52 #include <obd_class.h>
53 #include "ldlm_internal.h"
54
55 //struct lustre_lock ldlm_everything_lock;
56
57 /* lock types */
58 char *ldlm_lockname[] = {
59         [0] "--",
60         [LCK_EX] "EX",
61         [LCK_PW] "PW",
62         [LCK_PR] "PR",
63         [LCK_CW] "CW",
64         [LCK_CR] "CR",
65         [LCK_NL] "NL",
66         [LCK_GROUP] "GROUP"
67 };
68
69 char *ldlm_typename[] = {
70         [LDLM_PLAIN] "PLN",
71         [LDLM_EXTENT] "EXT",
72         [LDLM_FLOCK] "FLK",
73         [LDLM_IBITS] "IBT",
74 };
75
76 char *ldlm_it2str(int it)
77 {
78         switch (it) {
79         case IT_OPEN:
80                 return "open";
81         case IT_CREAT:
82                 return "creat";
83         case (IT_OPEN | IT_CREAT):
84                 return "open|creat";
85         case IT_READDIR:
86                 return "readdir";
87         case IT_GETATTR:
88                 return "getattr";
89         case IT_LOOKUP:
90                 return "lookup";
91         case IT_UNLINK:
92                 return "unlink";
93         case IT_GETXATTR:
94                 return "getxattr";
95         default:
96                 CERROR("Unknown intent %d\n", it);
97                 return "UNKNOWN";
98         }
99 }
100
101 extern cfs_mem_cache_t *ldlm_lock_slab;
102
103 static ldlm_processing_policy ldlm_processing_policy_table[] = {
104         [LDLM_PLAIN] ldlm_process_plain_lock,
105         [LDLM_EXTENT] ldlm_process_extent_lock,
106 #ifdef __KERNEL__
107         [LDLM_FLOCK] ldlm_process_flock_lock,
108 #endif
109         [LDLM_IBITS] ldlm_process_inodebits_lock,
110 };
111
112 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
113 {
114         return ldlm_processing_policy_table[res->lr_type];
115 }
116
117 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
118 {
119         ns->ns_policy = arg;
120 }
121
122 /*
123  * REFCOUNTED LOCK OBJECTS
124  */
125
126
127 /*
128  * Lock refcounts, during creation:
129  *   - one special one for allocation, dec'd only once in destroy
130  *   - one for being a lock that's in-use
131  *   - one for the addref associated with a new lock
132  */
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
134 {
135         atomic_inc(&lock->l_refc);
136         return lock;
137 }
138
139 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
140 {
141         LASSERT(size == sizeof(*lock));
142         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
143 }
144
145 void ldlm_lock_put(struct ldlm_lock *lock)
146 {
147         ENTRY;
148
149         LASSERT(lock->l_resource != LP_POISON);
150         LASSERT(atomic_read(&lock->l_refc) > 0);
151         if (atomic_dec_and_test(&lock->l_refc)) {
152                 struct ldlm_resource *res;
153
154                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing it.");
155
156                 res = lock->l_resource;
157                 LASSERT(lock->l_destroyed);
158                 LASSERT(list_empty(&lock->l_res_link));
159                 LASSERT(list_empty(&lock->l_pending_chain));
160
161                 atomic_dec(&res->lr_namespace->ns_locks);
162                 ldlm_resource_putref(res);
163                 lock->l_resource = NULL;
164                 if (lock->l_export) {
165                         class_export_put(lock->l_export);
166                         lock->l_export = NULL;
167                 }
168
169                 if (lock->l_lvb_data != NULL)
170                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
171
172                 ldlm_interval_free(ldlm_interval_detach(lock));
173                 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle,
174                                 ldlm_lock_free);
175         }
176
177         EXIT;
178 }
179
180 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
181 {
182         int rc = 0;
183         if (!list_empty(&lock->l_lru)) {
184                 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
185                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
186                 list_del_init(&lock->l_lru);
187                 ns->ns_nr_unused--;
188                 LASSERT(ns->ns_nr_unused >= 0);
189                 rc = 1;
190         }
191         return rc;
192 }
193
194 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
195 {
196         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
197         int rc;
198         ENTRY;
199         spin_lock(&ns->ns_unused_lock);
200         rc = ldlm_lock_remove_from_lru_nolock(lock);
201         spin_unlock(&ns->ns_unused_lock);
202         EXIT;
203         return rc;
204 }
205
206 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
207 {
208         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
209         lock->l_last_used = cfs_time_current();
210         LASSERT(list_empty(&lock->l_lru));
211         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
212         list_add_tail(&lock->l_lru, &ns->ns_unused_list);
213         LASSERT(ns->ns_nr_unused >= 0);
214         ns->ns_nr_unused++;
215 }
216
217 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
218 {
219         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
220         ENTRY;
221         spin_lock(&ns->ns_unused_lock);
222         ldlm_lock_add_to_lru_nolock(lock);
223         spin_unlock(&ns->ns_unused_lock);
224         EXIT;
225 }
226
227 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
228 {
229         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
230         ENTRY;
231         spin_lock(&ns->ns_unused_lock);
232         if (!list_empty(&lock->l_lru)) {
233                 ldlm_lock_remove_from_lru_nolock(lock);
234                 ldlm_lock_add_to_lru_nolock(lock);
235         }
236         spin_unlock(&ns->ns_unused_lock);
237         EXIT;
238 }
239
240 /* This used to have a 'strict' flag, which recovery would use to mark an
241  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
242  * shall explain why it's gone: with the new hash table scheme, once you call
243  * ldlm_lock_destroy, you can never drop your final references on this lock.
244  * Because it's not in the hash table anymore.  -phil */
245 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
246 {
247         ENTRY;
248
249         if (lock->l_readers || lock->l_writers) {
250                 LDLM_ERROR(lock, "lock still has references");
251                 ldlm_lock_dump(D_ERROR, lock, 0);
252                 LBUG();
253         }
254
255         if (!list_empty(&lock->l_res_link)) {
256                 LDLM_ERROR(lock, "lock still on resource");
257                 ldlm_lock_dump(D_ERROR, lock, 0);
258                 LBUG();
259         }
260
261         if (lock->l_destroyed) {
262                 LASSERT(list_empty(&lock->l_lru));
263                 EXIT;
264                 return 0;
265         }
266         lock->l_destroyed = 1;
267
268         if (lock->l_export && lock->l_export->exp_lock_hash)
269                 lustre_hash_del(lock->l_export->exp_lock_hash,
270                                 &lock->l_remote_handle, &lock->l_exp_hash);
271
272         ldlm_lock_remove_from_lru(lock);
273         class_handle_unhash(&lock->l_handle);
274
275 #if 0
276         /* Wake anyone waiting for this lock */
277         /* FIXME: I should probably add yet another flag, instead of using
278          * l_export to only call this on clients */
279         if (lock->l_export)
280                 class_export_put(lock->l_export);
281         lock->l_export = NULL;
282         if (lock->l_export && lock->l_completion_ast)
283                 lock->l_completion_ast(lock, 0);
284 #endif
285         EXIT;
286         return 1;
287 }
288
289 void ldlm_lock_destroy(struct ldlm_lock *lock)
290 {
291         int first;
292         ENTRY;
293         lock_res_and_lock(lock);
294         first = ldlm_lock_destroy_internal(lock);
295         unlock_res_and_lock(lock);
296
297         /* drop reference from hashtable only for first destroy */
298         if (first)
299                 LDLM_LOCK_PUT(lock);
300         EXIT;
301 }
302
303 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
304 {
305         int first;
306         ENTRY;
307         first = ldlm_lock_destroy_internal(lock);
308         /* drop reference from hashtable only for first destroy */
309         if (first)
310                 LDLM_LOCK_PUT(lock);
311         EXIT;
312 }
313
314 /* this is called by portals_handle2object with the handle lock taken */
315 static void lock_handle_addref(void *lock)
316 {
317         LDLM_LOCK_GET((struct ldlm_lock *)lock);
318 }
319
320 /*
321  * usage: pass in a resource on which you have done ldlm_resource_get
322  *        pass in a parent lock on which you have done a ldlm_lock_get
323  *        after return, ldlm_*_put the resource and parent
324  * returns: lock with refcount 2 - one for current caller and one for remote
325  */
326 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
327 {
328         struct ldlm_lock *lock;
329         ENTRY;
330
331         if (resource == NULL)
332                 LBUG();
333
334         OBD_SLAB_ALLOC(lock, ldlm_lock_slab, CFS_ALLOC_IO, sizeof(*lock));
335         if (lock == NULL)
336                 RETURN(NULL);
337
338         spin_lock_init(&lock->l_lock);
339         lock->l_resource = ldlm_resource_getref(resource);
340
341         atomic_set(&lock->l_refc, 2);
342         CFS_INIT_LIST_HEAD(&lock->l_res_link);
343         CFS_INIT_LIST_HEAD(&lock->l_lru);
344         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
345         CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
346         CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
347         cfs_waitq_init(&lock->l_waitq);
348         lock->l_blocking_lock = NULL;
349         CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
350         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
351         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
352
353         atomic_inc(&resource->lr_namespace->ns_locks);
354         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
355         class_handle_hash(&lock->l_handle, lock_handle_addref);
356
357         CFS_INIT_LIST_HEAD(&lock->l_extents_list);
358         spin_lock_init(&lock->l_extents_list_lock);
359         CFS_INIT_LIST_HEAD(&lock->l_cache_locks_list);
360         lock->l_callback_timeout = 0;
361
362         RETURN(lock);
363 }
364
365 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
366                               struct ldlm_res_id new_resid)
367 {
368         struct ldlm_resource *oldres = lock->l_resource;
369         struct ldlm_resource *newres;
370         int type;
371         ENTRY;
372
373         LASSERT(ns_is_client(ns));
374
375         lock_res_and_lock(lock);
376         if (memcmp(&new_resid, &lock->l_resource->lr_name,
377                    sizeof(lock->l_resource->lr_name)) == 0) {
378                 /* Nothing to do */
379                 unlock_res_and_lock(lock);
380                 RETURN(0);
381         }
382
383         LASSERT(new_resid.name[0] != 0);
384
385         /* This function assumes that the lock isn't on any lists */
386         LASSERT(list_empty(&lock->l_res_link));
387
388         type = oldres->lr_type;
389         unlock_res_and_lock(lock);
390
391         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
392         if (newres == NULL)
393                 RETURN(-ENOMEM);
394
395         lock_res_and_lock(lock);
396         LASSERT(memcmp(&new_resid, &lock->l_resource->lr_name,
397                        sizeof(lock->l_resource->lr_name)) != 0);
398         lock_res(newres);
399         lock->l_resource = newres;
400         unlock_res(oldres);
401         unlock_res_and_lock(lock);
402
403         /* ...and the flowers are still standing! */
404         ldlm_resource_putref(oldres);
405
406         RETURN(0);
407 }
408
409 /*
410  *  HANDLES
411  */
412
413 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
414 {
415         lockh->cookie = lock->l_handle.h_cookie;
416 }
417
418 /* if flags: atomically get the lock and set the flags.
419  *           Return NULL if flag already set
420  */
421
422 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
423 {
424         struct ldlm_namespace *ns;
425         struct ldlm_lock *lock = NULL, *retval = NULL;
426         ENTRY;
427
428         LASSERT(handle);
429
430         lock = class_handle2object(handle->cookie);
431         if (lock == NULL)
432                 RETURN(NULL);
433
434         LASSERT(lock->l_resource != NULL);
435         ns = lock->l_resource->lr_namespace;
436         LASSERT(ns != NULL);
437
438         lock_res_and_lock(lock);
439
440         /* It's unlikely but possible that someone marked the lock as
441          * destroyed after we did handle2object on it */
442         if (lock->l_destroyed) {
443                 unlock_res_and_lock(lock);
444                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
445                 LDLM_LOCK_PUT(lock);
446                 GOTO(out, retval);
447         }
448
449         if (flags && (lock->l_flags & flags)) {
450                 unlock_res_and_lock(lock);
451                 LDLM_LOCK_PUT(lock);
452                 GOTO(out, retval);
453         }
454
455         if (flags)
456                 lock->l_flags |= flags;
457
458         unlock_res_and_lock(lock);
459         retval = lock;
460         EXIT;
461  out:
462         return retval;
463 }
464
465 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
466                                       struct lustre_handle *handle)
467 {
468         struct ldlm_lock *retval = NULL;
469         retval = __ldlm_handle2lock(handle, 0);
470         return retval;
471 }
472
473 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
474 {
475         struct obd_export *exp = lock->l_export?:lock->l_conn_export;
476         /* INODEBITS_INTEROP: If the other side does not support
477          * inodebits, reply with a plain lock descriptor.
478          */
479         if ((lock->l_resource->lr_type == LDLM_IBITS) &&
480             (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
481                 struct ldlm_resource res = *lock->l_resource;
482
483                 /* Make sure all the right bits are set in this lock we
484                    are going to pass to client */
485                 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
486                          (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
487                          "Inappropriate inode lock bits during "
488                          "conversion " LPU64 "\n",
489                          lock->l_policy_data.l_inodebits.bits);
490                 res.lr_type = LDLM_PLAIN;
491                 ldlm_res2desc(&res, &desc->l_resource);
492                 /* Convert "new" lock mode to something old client can
493                    understand */
494                 if ((lock->l_req_mode == LCK_CR) ||
495                     (lock->l_req_mode == LCK_CW))
496                         desc->l_req_mode = LCK_PR;
497                 else
498                         desc->l_req_mode = lock->l_req_mode;
499                 if ((lock->l_granted_mode == LCK_CR) ||
500                     (lock->l_granted_mode == LCK_CW)) {
501                         desc->l_granted_mode = LCK_PR;
502                 } else {
503                         /* We never grant PW/EX locks to clients */
504                         LASSERT((lock->l_granted_mode != LCK_PW) &&
505                                 (lock->l_granted_mode != LCK_EX));
506                         desc->l_granted_mode = lock->l_granted_mode;
507                 }
508
509                 /* We do not copy policy here, because there is no
510                    policy for plain locks */
511         } else {
512                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
513                 desc->l_req_mode = lock->l_req_mode;
514                 desc->l_granted_mode = lock->l_granted_mode;
515                 desc->l_policy_data = lock->l_policy_data;
516         }
517 }
518
519 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
520                            struct list_head *work_list)
521 {
522         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
523                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
524                 lock->l_flags |= LDLM_FL_AST_SENT;
525                 /* If the enqueuing client said so, tell the AST recipient to
526                  * discard dirty data, rather than writing back. */
527                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
528                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
529                 LASSERT(list_empty(&lock->l_bl_ast));
530                 list_add(&lock->l_bl_ast, work_list);
531                 LDLM_LOCK_GET(lock);
532                 LASSERT(lock->l_blocking_lock == NULL);
533                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
534         }
535 }
536
537 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
538 {
539         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
540                 lock->l_flags |= LDLM_FL_CP_REQD;
541                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
542                 LASSERT(list_empty(&lock->l_cp_ast));
543                 list_add(&lock->l_cp_ast, work_list);
544                 LDLM_LOCK_GET(lock);
545         }
546 }
547
548 /* must be called with lr_lock held */
549 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
550                                 struct list_head *work_list)
551 {
552         ENTRY;
553         check_res_locked(lock->l_resource);
554         if (new)
555                 ldlm_add_bl_work_item(lock, new, work_list);
556         else
557                 ldlm_add_cp_work_item(lock, work_list);
558         EXIT;
559 }
560
561 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
562 {
563         struct ldlm_lock *lock;
564
565         lock = ldlm_handle2lock(lockh);
566         LASSERT(lock != NULL);
567         ldlm_lock_addref_internal(lock, mode);
568         LDLM_LOCK_PUT(lock);
569 }
570
571 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
572 {
573         ldlm_lock_remove_from_lru(lock);
574         if (mode & (LCK_NL | LCK_CR | LCK_PR))
575                 lock->l_readers++;
576         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
577                 lock->l_writers++;
578         LDLM_LOCK_GET(lock);
579         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
580 }
581
582 /* only called for local locks */
583 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
584 {
585         lock_res_and_lock(lock);
586         ldlm_lock_addref_internal_nolock(lock, mode);
587         unlock_res_and_lock(lock);
588 }
589
590 /* only called in ldlm_flock_destroy and for local locks.
591  * for LDLM_FLOCK type locks, l_blocking_ast is null, and
592  * ldlm_lock_remove_from_lru() does nothing, it is safe
593  * for ldlm_flock_destroy usage by dropping some code */
594 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
595 {
596         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
597         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
598                 LASSERT(lock->l_readers > 0);
599                 lock->l_readers--;
600         }
601         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
602                 LASSERT(lock->l_writers > 0);
603                 lock->l_writers--;
604         }
605
606         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
607 }
608
609 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
610 {
611         struct ldlm_namespace *ns;
612         ENTRY;
613
614         lock_res_and_lock(lock);
615
616         ns = lock->l_resource->lr_namespace;
617
618         ldlm_lock_decref_internal_nolock(lock, mode);
619
620         if (lock->l_flags & LDLM_FL_LOCAL &&
621             !lock->l_readers && !lock->l_writers) {
622                 /* If this is a local lock on a server namespace and this was
623                  * the last reference, cancel the lock. */
624                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
625                 lock->l_flags |= LDLM_FL_CBPENDING;
626         }
627
628         if (!lock->l_readers && !lock->l_writers &&
629             (lock->l_flags & LDLM_FL_CBPENDING)) {
630                 /* If we received a blocked AST and this was the last reference,
631                  * run the callback. */
632                 if (ns_is_server(ns) && lock->l_export)
633                         CERROR("FL_CBPENDING set on non-local lock--just a "
634                                "warning\n");
635
636                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
637
638                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
639                 ldlm_lock_remove_from_lru(lock);
640                 unlock_res_and_lock(lock);
641
642                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
643                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
644
645                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
646                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
647                         ldlm_handle_bl_callback(ns, NULL, lock);
648         } else if (ns_is_client(ns) &&
649                    !lock->l_readers && !lock->l_writers &&
650                    !(lock->l_flags & LDLM_FL_NO_LRU) &&
651                    !(lock->l_flags & LDLM_FL_BL_AST)) {
652                 /* If this is a client-side namespace and this was the last
653                  * reference, put it on the LRU. */
654                 ldlm_lock_add_to_lru(lock);
655                 unlock_res_and_lock(lock);
656                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
657                  * are not supported by the server, otherwise, it is done on
658                  * enqueue. */
659                 if (!exp_connect_cancelset(lock->l_conn_export) &&
660                     !ns_connect_lru_resize(ns))
661                         ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
662         } else {
663                 unlock_res_and_lock(lock);
664         }
665
666         EXIT;
667 }
668
669 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
670 {
671         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
672         LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
673         ldlm_lock_decref_internal(lock, mode);
674         LDLM_LOCK_PUT(lock);
675 }
676
677 /* This will drop a lock reference and mark it for destruction, but will not
678  * necessarily cancel the lock before returning. */
679 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
680 {
681         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
682         ENTRY;
683
684         LASSERT(lock != NULL);
685
686         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
687         lock_res_and_lock(lock);
688         lock->l_flags |= LDLM_FL_CBPENDING;
689         unlock_res_and_lock(lock);
690         ldlm_lock_decref_internal(lock, mode);
691         LDLM_LOCK_PUT(lock);
692 }
693
694 struct sl_insert_point {
695         struct list_head *res_link;
696         struct list_head *mode_link;
697         struct list_head *policy_link;
698 };
699
700 /*
701  * search_granted_lock
702  *
703  * Description:
704  *      Finds a position to insert the new lock.
705  * Parameters:
706  *      queue [input]:  the granted list where search acts on;
707  *      req [input]:    the lock whose position to be located;
708  *      prev [output]:  positions within 3 lists to insert @req to
709  * Return Value:
710  *      filled @prev
711  * NOTE: called by
712  *  - ldlm_grant_lock_with_skiplist
713  */
714 static void search_granted_lock(struct list_head *queue,
715                                 struct ldlm_lock *req,
716                                 struct sl_insert_point *prev)
717 {
718         struct list_head *tmp;
719         struct ldlm_lock *lock, *mode_end, *policy_end;
720         ENTRY;
721
722         list_for_each(tmp, queue) {
723                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
724
725                 mode_end = list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
726                                       l_sl_mode);
727
728                 if (lock->l_req_mode != req->l_req_mode) {
729                         /* jump to last lock of mode group */
730                         tmp = &mode_end->l_res_link;
731                         continue;
732                 }
733
734                 /* suitable mode group is found */
735                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
736                         /* insert point is last lock of the mode group */
737                         prev->res_link = &mode_end->l_res_link;
738                         prev->mode_link = &mode_end->l_sl_mode;
739                         prev->policy_link = &req->l_sl_policy;
740                         EXIT;
741                         return;
742                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
743                         for (;;) {
744                                 policy_end = list_entry(lock->l_sl_policy.prev,
745                                                         struct ldlm_lock,
746                                                         l_sl_policy);
747
748                                 if (lock->l_policy_data.l_inodebits.bits ==
749                                     req->l_policy_data.l_inodebits.bits) {
750                                         /* insert point is last lock of
751                                          * the policy group */
752                                         prev->res_link =
753                                                 &policy_end->l_res_link;
754                                         prev->mode_link =
755                                                 &policy_end->l_sl_mode;
756                                         prev->policy_link =
757                                                 &policy_end->l_sl_policy;
758                                         EXIT;
759                                         return;
760                                 }
761
762                                 if (policy_end == mode_end)
763                                         /* done with mode group */
764                                         break;
765
766                                 /* jump to next policy group within the mode group */
767                                 tmp = policy_end->l_res_link.next;
768                                 lock = list_entry(tmp, struct ldlm_lock,
769                                                   l_res_link);
770                         }  /* loop over policy groups within the mode group */
771
772                         /* insert point is last lock of the mode group,
773                          * new policy group is started */
774                         prev->res_link = &mode_end->l_res_link;
775                         prev->mode_link = &mode_end->l_sl_mode;
776                         prev->policy_link = &req->l_sl_policy;
777                         EXIT;
778                         return;
779                 } else {
780                         LDLM_ERROR(lock, "is not LDLM_PLAIN or LDLM_IBITS lock");
781                         LBUG();
782                 }
783         }
784
785         /* insert point is last lock on the queue,
786          * new mode group and new policy group are started */
787         prev->res_link = queue->prev;
788         prev->mode_link = &req->l_sl_mode;
789         prev->policy_link = &req->l_sl_policy;
790         EXIT;
791         return;
792 }
793
794 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
795                                        struct sl_insert_point *prev)
796 {
797         struct ldlm_resource *res = lock->l_resource;
798         ENTRY;
799
800         check_res_locked(res);
801
802         ldlm_resource_dump(D_INFO, res);
803         CDEBUG(D_OTHER, "About to add this lock:\n");
804         ldlm_lock_dump(D_OTHER, lock, 0);
805
806         if (lock->l_destroyed) {
807                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
808                 return;
809         }
810
811         LASSERT(list_empty(&lock->l_res_link));
812         LASSERT(list_empty(&lock->l_sl_mode));
813         LASSERT(list_empty(&lock->l_sl_policy));
814
815         list_add(&lock->l_res_link, prev->res_link);
816         list_add(&lock->l_sl_mode, prev->mode_link);
817         list_add(&lock->l_sl_policy, prev->policy_link);
818
819         EXIT;
820 }
821
822 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
823 {
824         struct sl_insert_point prev;
825         ENTRY;
826
827         LASSERT(lock->l_req_mode == lock->l_granted_mode);
828
829         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
830         ldlm_granted_list_add_lock(lock, &prev);
831         EXIT;
832 }
833
834 /* NOTE: called by
835  *  - ldlm_lock_enqueue
836  *  - ldlm_reprocess_queue
837  *  - ldlm_lock_convert
838  *
839  * must be called with lr_lock held
840  */
841 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
842 {
843         struct ldlm_resource *res = lock->l_resource;
844         ENTRY;
845
846         check_res_locked(res);
847
848         lock->l_granted_mode = lock->l_req_mode;
849         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
850                 ldlm_grant_lock_with_skiplist(lock);
851         else if (res->lr_type == LDLM_EXTENT)
852                 ldlm_extent_add_lock(res, lock);
853         else
854                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
855
856         if (lock->l_granted_mode < res->lr_most_restr)
857                 res->lr_most_restr = lock->l_granted_mode;
858
859         if (work_list && lock->l_completion_ast != NULL)
860                 ldlm_add_ast_work_item(lock, NULL, work_list);
861
862         ldlm_pool_add(&res->lr_namespace->ns_pool, lock);
863         EXIT;
864 }
865
866 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
867  * comment above ldlm_lock_match */
868 static struct ldlm_lock *search_queue(struct list_head *queue,
869                                       ldlm_mode_t *mode,
870                                       ldlm_policy_data_t *policy,
871                                       struct ldlm_lock *old_lock, int flags)
872 {
873         struct ldlm_lock *lock;
874         struct list_head *tmp;
875
876         list_for_each(tmp, queue) {
877                 ldlm_mode_t match;
878
879                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
880
881                 if (lock == old_lock)
882                         break;
883
884                 /* llite sometimes wants to match locks that will be
885                  * canceled when their users drop, but we allow it to match
886                  * if it passes in CBPENDING and the lock still has users.
887                  * this is generally only going to be used by children
888                  * whose parents already hold a lock so forward progress
889                  * can still happen. */
890                 if (lock->l_flags & LDLM_FL_CBPENDING &&
891                     !(flags & LDLM_FL_CBPENDING))
892                         continue;
893                 if (lock->l_flags & LDLM_FL_CBPENDING &&
894                     lock->l_readers == 0 && lock->l_writers == 0)
895                         continue;
896
897                 if (!(lock->l_req_mode & *mode))
898                         continue;
899
900                 match = lock->l_req_mode;
901                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
902                     (lock->l_policy_data.l_extent.start >
903                      policy->l_extent.start ||
904                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
905                         continue;
906
907                 if (unlikely(match == LCK_GROUP) &&
908                     lock->l_resource->lr_type == LDLM_EXTENT &&
909                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
910                         continue;
911
912                 /* We match if we have existing lock with same or wider set
913                    of bits. */
914                 if (lock->l_resource->lr_type == LDLM_IBITS &&
915                      ((lock->l_policy_data.l_inodebits.bits &
916                       policy->l_inodebits.bits) !=
917                       policy->l_inodebits.bits))
918                         continue;
919
920                 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
921                         continue;
922
923                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
924                     !(lock->l_flags & LDLM_FL_LOCAL))
925                         continue;
926
927                 if (flags & LDLM_FL_TEST_LOCK) {
928                         LDLM_LOCK_GET(lock);
929                         ldlm_lock_touch_in_lru(lock);
930                 } else {
931                         ldlm_lock_addref_internal_nolock(lock, match);
932                 }
933                 *mode = match;
934                 return lock;
935         }
936
937         return NULL;
938 }
939
940 void ldlm_lock_allow_match(struct ldlm_lock *lock)
941 {
942         lock_res_and_lock(lock);
943         lock->l_flags |= LDLM_FL_LVB_READY;
944         cfs_waitq_signal(&lock->l_waitq);
945         unlock_res_and_lock(lock);
946 }
947
948 int ldlm_lock_fast_match(struct ldlm_lock *lock, int rw,
949                          obd_off start, obd_off end,
950                          void **cookie)
951 {
952         LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE);
953
954         if (!lock)
955                 return 0;
956
957         lock_res_and_lock(lock);
958         /* check if granted mode is compatible */
959         if (rw == OBD_BRW_WRITE &&
960             !(lock->l_granted_mode & (LCK_PW|LCK_GROUP)))
961                 goto no_match;
962
963         /* does the lock cover the region we would like to access? */
964         if ((lock->l_policy_data.l_extent.start > start) ||
965             (lock->l_policy_data.l_extent.end < end))
966                 goto no_match;
967
968         /* if we received a blocking callback and the lock is no longer
969          * referenced, don't use it */
970         if ((lock->l_flags & LDLM_FL_CBPENDING) &&
971             !lock->l_writers && !lock->l_readers)
972                 goto no_match;
973
974         ldlm_lock_addref_internal_nolock(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR);
975         unlock_res_and_lock(lock);
976         *cookie = (void *)lock;
977         return 1; /* avoid using rc for stack relief */
978
979 no_match:
980         unlock_res_and_lock(lock);
981         return 0;
982 }
983
984 void ldlm_lock_fast_release(void *cookie, int rw)
985 {
986         struct ldlm_lock *lock = (struct ldlm_lock *)cookie;
987
988         LASSERT(lock != NULL);
989         LASSERT(rw == OBD_BRW_READ || rw == OBD_BRW_WRITE);
990         LASSERT(rw == OBD_BRW_READ || (lock->l_granted_mode & (LCK_PW | LCK_GROUP)));
991         ldlm_lock_decref_internal(lock, rw == OBD_BRW_WRITE ? LCK_PW : LCK_PR);
992 }
993
994 /* Can be called in two ways:
995  *
996  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
997  * for a duplicate of.
998  *
999  * Otherwise, all of the fields must be filled in, to match against.
1000  *
1001  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1002  *     server (ie, connh is NULL)
1003  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1004  *     list will be considered
1005  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1006  *     to be canceled can still be matched as long as they still have reader
1007  *     or writer refernces
1008  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1009  *     just tell us if we would have matched.
1010  *
1011  * Returns 1 if it finds an already-existing lock that is compatible; in this
1012  * case, lockh is filled in with a addref()ed lock
1013  */
1014 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1015                             struct ldlm_res_id *res_id, ldlm_type_t type,
1016                             ldlm_policy_data_t *policy, ldlm_mode_t mode,
1017                             struct lustre_handle *lockh)
1018 {
1019         struct ldlm_resource *res;
1020         struct ldlm_lock *lock, *old_lock = NULL;
1021         int rc = 0;
1022         ENTRY;
1023
1024         if (ns == NULL) {
1025                 old_lock = ldlm_handle2lock(lockh);
1026                 LASSERT(old_lock);
1027
1028                 ns = old_lock->l_resource->lr_namespace;
1029                 res_id = &old_lock->l_resource->lr_name;
1030                 type = old_lock->l_resource->lr_type;
1031                 mode = old_lock->l_req_mode;
1032         }
1033
1034         res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
1035         if (res == NULL) {
1036                 LASSERT(old_lock == NULL);
1037                 RETURN(0);
1038         }
1039
1040         lock_res(res);
1041
1042         lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags);
1043         if (lock != NULL)
1044                 GOTO(out, rc = 1);
1045         if (flags & LDLM_FL_BLOCK_GRANTED)
1046                 GOTO(out, rc = 0);
1047         lock = search_queue(&res->lr_converting, &mode, policy, old_lock, flags);
1048         if (lock != NULL)
1049                 GOTO(out, rc = 1);
1050         lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags);
1051         if (lock != NULL)
1052                 GOTO(out, rc = 1);
1053
1054         EXIT;
1055  out:
1056         unlock_res(res);
1057         ldlm_resource_putref(res);
1058
1059         if (lock) {
1060                 ldlm_lock2handle(lock, lockh);
1061                 if ((flags & LDLM_FL_LVB_READY) && (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1062                         struct l_wait_info lwi;
1063                         if (lock->l_completion_ast) {
1064                                 int err = lock->l_completion_ast(lock,
1065                                                           LDLM_FL_WAIT_NOREPROC,
1066                                                                  NULL);
1067                                 if (err) {
1068                                         if (flags & LDLM_FL_TEST_LOCK)
1069                                                 LDLM_LOCK_PUT(lock);
1070                                         else
1071                                                 ldlm_lock_decref_internal(lock, mode);
1072                                         rc = 0;
1073                                         goto out2;
1074                                 }
1075                         }
1076
1077                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), NULL,
1078                                                LWI_ON_SIGNAL_NOOP, NULL);
1079
1080                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1081                         l_wait_event(lock->l_waitq,
1082                                      (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1083                 }
1084         }
1085  out2:
1086         if (rc) {
1087                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1088                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1089                                 res_id->name[2] : policy->l_extent.start,
1090                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1091                                 res_id->name[3] : policy->l_extent.end);
1092         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1093                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1094                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1095                                   type, mode, res_id->name[0], res_id->name[1],
1096                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1097                                         res_id->name[2] :policy->l_extent.start,
1098                                 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1099                                         res_id->name[3] : policy->l_extent.end);
1100         }
1101         if (old_lock)
1102                 LDLM_LOCK_PUT(old_lock);
1103         if (flags & LDLM_FL_TEST_LOCK && rc)
1104                 LDLM_LOCK_PUT(lock);
1105
1106         return rc ? mode : 0;
1107 }
1108
1109 /* Returns a referenced lock */
1110 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1111                                    struct ldlm_res_id res_id, ldlm_type_t type,
1112                                    ldlm_mode_t mode,
1113                                    ldlm_blocking_callback blocking,
1114                                    ldlm_completion_callback completion,
1115                                    ldlm_glimpse_callback glimpse,
1116                                    void *data, __u32 lvb_len)
1117 {
1118         struct ldlm_lock *lock;
1119         struct ldlm_resource *res;
1120         ENTRY;
1121
1122         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1123         if (res == NULL)
1124                 RETURN(NULL);
1125
1126         lock = ldlm_lock_new(res);
1127         ldlm_resource_putref(res);
1128
1129         if (lock == NULL)
1130                 RETURN(NULL);
1131
1132         lock->l_req_mode = mode;
1133         lock->l_ast_data = data;
1134         lock->l_blocking_ast = blocking;
1135         lock->l_completion_ast = completion;
1136         lock->l_glimpse_ast = glimpse;
1137         lock->l_pid = cfs_curproc_pid();
1138
1139         lock->l_tree_node = NULL;
1140         /* if this is the extent lock, allocate the interval tree node */
1141         if (type == LDLM_EXTENT) {
1142                 if (ldlm_interval_alloc(lock) == NULL)
1143                         GOTO(out, 0);
1144         }
1145
1146         if (lvb_len) {
1147                 lock->l_lvb_len = lvb_len;
1148                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1149                 if (lock->l_lvb_data == NULL)
1150                         GOTO(out, 0);
1151         }
1152
1153         RETURN(lock);
1154
1155 out:
1156         if (lock->l_lvb_data)
1157                 OBD_FREE(lock->l_lvb_data, lvb_len);
1158         ldlm_interval_free(ldlm_interval_detach(lock));
1159         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
1160         return NULL;
1161 }
1162
1163 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1164                                struct ldlm_lock **lockp,
1165                                void *cookie, int *flags)
1166 {
1167         struct ldlm_lock *lock = *lockp;
1168         struct ldlm_resource *res = lock->l_resource;
1169         int local = ns_is_client(res->lr_namespace);
1170         ldlm_processing_policy policy;
1171         ldlm_error_t rc = ELDLM_OK;
1172         struct ldlm_interval *node = NULL;
1173         ENTRY;
1174
1175         do_gettimeofday(&lock->l_enqueued_time);
1176         /* policies are not executed on the client or during replay */
1177         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1178             && !local && ns->ns_policy) {
1179                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1180                                    NULL);
1181                 if (rc == ELDLM_LOCK_REPLACED) {
1182                         /* The lock that was returned has already been granted,
1183                          * and placed into lockp.  If it's not the same as the
1184                          * one we passed in, then destroy the old one and our
1185                          * work here is done. */
1186                         if (lock != *lockp) {
1187                                 ldlm_lock_destroy(lock);
1188                                 LDLM_LOCK_PUT(lock);
1189                         }
1190                         *flags |= LDLM_FL_LOCK_CHANGED;
1191                         RETURN(0);
1192                 } else if (rc != ELDLM_OK ||
1193                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1194                         ldlm_lock_destroy(lock);
1195                         RETURN(rc);
1196                 }
1197         }
1198
1199         /* For a replaying lock, it might be already in granted list. So
1200          * unlinking the lock will cause the interval node to be freed, we
1201          * have to allocate the interval node early otherwise we can't regrant
1202          * this lock in the future. - jay */
1203         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1204                 OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO,
1205                                sizeof(*node));
1206
1207         lock_res_and_lock(lock);
1208         if (local && lock->l_req_mode == lock->l_granted_mode) {
1209                 /* The server returned a blocked lock, but it was granted
1210                  * before we got a chance to actually enqueue it.  We don't
1211                  * need to do anything else. */
1212                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1213                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1214                 GOTO(out, ELDLM_OK);
1215         }
1216
1217         ldlm_resource_unlink_lock(lock);
1218         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1219                 LASSERT(!local && (*flags & LDLM_FL_REPLAY));
1220                 if (node == NULL) {
1221                         ldlm_lock_destroy_nolock(lock);
1222                         GOTO(out, rc = -ENOMEM);
1223                 }
1224
1225                 CFS_INIT_LIST_HEAD(&node->li_group);
1226                 ldlm_interval_attach(node, lock);
1227                 node = NULL;
1228         }
1229
1230         /* Some flags from the enqueue want to make it into the AST, via the
1231          * lock's l_flags. */
1232         lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1233
1234         /* This distinction between local lock trees is very important; a client
1235          * namespace only has information about locks taken by that client, and
1236          * thus doesn't have enough information to decide for itself if it can
1237          * be granted (below).  In this case, we do exactly what the server
1238          * tells us to do, as dictated by the 'flags'.
1239          *
1240          * We do exactly the same thing during recovery, when the server is
1241          * more or less trusting the clients not to lie.
1242          *
1243          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1244          * granted/converting queues. */
1245         if (local) {
1246                 if (*flags & LDLM_FL_BLOCK_CONV)
1247                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1248                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1249                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1250                 else
1251                         ldlm_grant_lock(lock, NULL);
1252                 GOTO(out, ELDLM_OK);
1253         } else if (*flags & LDLM_FL_REPLAY) {
1254                 if (*flags & LDLM_FL_BLOCK_CONV) {
1255                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1256                         GOTO(out, ELDLM_OK);
1257                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1258                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1259                         GOTO(out, ELDLM_OK);
1260                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1261                         ldlm_grant_lock(lock, NULL);
1262                         GOTO(out, ELDLM_OK);
1263                 }
1264                 /* If no flags, fall through to normal enqueue path. */
1265         }
1266
1267         policy = ldlm_processing_policy_table[res->lr_type];
1268         policy(lock, flags, 1, &rc, NULL);
1269         GOTO(out, rc);
1270 out:
1271         unlock_res_and_lock(lock);
1272         if (node)
1273                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1274         return rc;
1275 }
1276
1277 /* Must be called with namespace taken: queue is waiting or converting. */
1278 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1279                          struct list_head *work_list)
1280 {
1281         struct list_head *tmp, *pos;
1282         ldlm_processing_policy policy;
1283         int flags;
1284         int rc = LDLM_ITER_CONTINUE;
1285         ldlm_error_t err;
1286         ENTRY;
1287
1288         check_res_locked(res);
1289
1290         policy = ldlm_processing_policy_table[res->lr_type];
1291         LASSERT(policy);
1292
1293         list_for_each_safe(tmp, pos, queue) {
1294                 struct ldlm_lock *pending;
1295                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1296
1297                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1298
1299                 flags = 0;
1300                 rc = policy(pending, &flags, 0, &err, work_list);
1301                 if (rc != LDLM_ITER_CONTINUE)
1302                         break;
1303         }
1304
1305         RETURN(rc);
1306 }
1307
1308 /* Helper function for pair ldlm_run_{bl,cp}_ast_work().
1309  *
1310  * Send an existing rpc set specified by @arg->set and then
1311  * destroy it. Create new one if @do_create flag is set. */
1312 static void
1313 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1314 {
1315         int rc;
1316
1317         rc = ptlrpc_set_wait(arg->set);
1318         if (arg->type == LDLM_BL_CALLBACK)
1319                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1320         ptlrpc_set_destroy(arg->set);
1321
1322         if (do_create)
1323                 arg->set = ptlrpc_prep_set();
1324 }
1325
1326 int ldlm_run_bl_ast_work(struct list_head *rpc_list)
1327 {
1328         struct ldlm_cb_set_arg arg;
1329         struct list_head *tmp, *pos;
1330         struct ldlm_lock_desc d;
1331         int ast_count;
1332         int rc = 0;
1333         ENTRY;
1334
1335         arg.set = ptlrpc_prep_set();
1336         atomic_set(&arg.restart, 0);
1337         arg.type = LDLM_BL_CALLBACK;
1338
1339         ast_count = 0;
1340         list_for_each_safe(tmp, pos, rpc_list) {
1341                 struct ldlm_lock *lock =
1342                         list_entry(tmp, struct ldlm_lock, l_bl_ast);
1343
1344                 /* nobody should touch l_bl_ast */
1345                 lock_res_and_lock(lock);
1346                 list_del_init(&lock->l_bl_ast);
1347
1348                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1349                 LASSERT(lock->l_bl_ast_run == 0);
1350                 LASSERT(lock->l_blocking_lock);
1351                 lock->l_bl_ast_run++;
1352                 unlock_res_and_lock(lock);
1353
1354                 ldlm_lock2desc(lock->l_blocking_lock, &d);
1355
1356                 LDLM_LOCK_PUT(lock->l_blocking_lock);
1357                 lock->l_blocking_lock = NULL;
1358                 rc = lock->l_blocking_ast(lock, &d, (void *)&arg,
1359                                           LDLM_CB_BLOCKING);
1360                 LDLM_LOCK_PUT(lock);
1361                 ast_count++;
1362
1363                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1364                  * and create a new set for requests that remained in
1365                  * @rpc_list */
1366                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1367                         ldlm_send_and_maybe_create_set(&arg, 1);
1368                         ast_count = 0;
1369                 }
1370         }
1371
1372         if (ast_count > 0)
1373                 ldlm_send_and_maybe_create_set(&arg, 0);
1374         else
1375                 /* In case when number of ASTs is multiply of
1376                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1377                  * @arg.set must be destroyed here, otherwise we get
1378                  * write memory leaking. */
1379                 ptlrpc_set_destroy(arg.set);
1380
1381         RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
1382 }
1383
1384 int ldlm_run_cp_ast_work(struct list_head *rpc_list)
1385 {
1386         struct ldlm_cb_set_arg arg;
1387         struct list_head *tmp, *pos;
1388         int ast_count;
1389         int rc = 0;
1390         ENTRY;
1391
1392         arg.set = ptlrpc_prep_set();
1393         atomic_set(&arg.restart, 0);
1394         arg.type = LDLM_CP_CALLBACK;
1395
1396         /* It's possible to receive a completion AST before we've set
1397          * the l_completion_ast pointer: either because the AST arrived
1398          * before the reply, or simply because there's a small race
1399          * window between receiving the reply and finishing the local
1400          * enqueue. (bug 842)
1401          *
1402          * This can't happen with the blocking_ast, however, because we
1403          * will never call the local blocking_ast until we drop our
1404          * reader/writer reference, which we won't do until we get the
1405          * reply and finish enqueueing. */
1406
1407         ast_count = 0;
1408         list_for_each_safe(tmp, pos, rpc_list) {
1409                 struct ldlm_lock *lock =
1410                         list_entry(tmp, struct ldlm_lock, l_cp_ast);
1411                 ldlm_completion_callback completion_callback;
1412
1413                 /* nobody should touch l_cp_ast */
1414                 lock_res_and_lock(lock);
1415                 list_del_init(&lock->l_cp_ast);
1416                 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1417                 /* save l_completion_ast since it can be changed by
1418                  * mds_intent_policy(), see bug 14225 */
1419                 completion_callback = lock->l_completion_ast;
1420                 lock->l_flags &= ~LDLM_FL_CP_REQD;
1421                 unlock_res_and_lock(lock);
1422
1423                 if (completion_callback != NULL) {
1424                         rc = completion_callback(lock, 0, (void *)&arg);
1425                         ast_count++;
1426                 }
1427                 LDLM_LOCK_PUT(lock);
1428
1429                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1430                  * and create a new set for requests that remained in
1431                  * @rpc_list */
1432                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1433                         ldlm_send_and_maybe_create_set(&arg, 1);
1434                         ast_count = 0;
1435                 }
1436         }
1437
1438         if (ast_count > 0)
1439                 ldlm_send_and_maybe_create_set(&arg, 0);
1440         else
1441                 /* In case when number of ASTs is multiply of
1442                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1443                  * @arg.set must be destroyed here, otherwise we get
1444                  * write memory leaking. */
1445                 ptlrpc_set_destroy(arg.set);
1446
1447         RETURN(atomic_read(&arg.restart) ? -ERESTART : 0);
1448 }
1449
1450 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1451 {
1452         ldlm_reprocess_all(res);
1453         return LDLM_ITER_CONTINUE;
1454 }
1455
1456 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1457 {
1458         struct list_head *tmp;
1459         int i, rc;
1460
1461         ENTRY;
1462         spin_lock(&ns->ns_hash_lock);
1463         for (i = 0; i < RES_HASH_SIZE; i++) {
1464                 tmp = ns->ns_hash[i].next;
1465                 while (tmp != &(ns->ns_hash[i])) {
1466                         struct ldlm_resource *res =
1467                                 list_entry(tmp, struct ldlm_resource, lr_hash);
1468
1469                         ldlm_resource_getref(res);
1470                         spin_unlock(&ns->ns_hash_lock);
1471
1472                         rc = reprocess_one_queue(res, NULL);
1473
1474                         spin_lock(&ns->ns_hash_lock);
1475                         tmp = tmp->next;
1476                         ldlm_resource_putref_locked(res);
1477
1478                         if (rc == LDLM_ITER_STOP)
1479                                 GOTO(out, rc);
1480                 }
1481         }
1482  out:
1483         spin_unlock(&ns->ns_hash_lock);
1484         EXIT;
1485 }
1486
1487 void ldlm_reprocess_all(struct ldlm_resource *res)
1488 {
1489         CFS_LIST_HEAD(rpc_list);
1490         int rc;
1491         ENTRY;
1492
1493         /* Local lock trees don't get reprocessed. */
1494         if (ns_is_client(res->lr_namespace)) {
1495                 EXIT;
1496                 return;
1497         }
1498
1499  restart:
1500         lock_res(res);
1501         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1502         if (rc == LDLM_ITER_CONTINUE)
1503                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1504         unlock_res(res);
1505
1506         rc = ldlm_run_cp_ast_work(&rpc_list);
1507         if (rc == -ERESTART) {
1508                 LASSERT(list_empty(&rpc_list));
1509                 goto restart;
1510         }
1511         EXIT;
1512 }
1513
1514 void ldlm_cancel_callback(struct ldlm_lock *lock)
1515 {
1516         check_res_locked(lock->l_resource);
1517         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1518                 lock->l_flags |= LDLM_FL_CANCEL;
1519                 if (lock->l_blocking_ast) {
1520                         // l_check_no_ns_lock(ns);
1521                         unlock_res_and_lock(lock);
1522                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1523                                              LDLM_CB_CANCELING);
1524                         lock_res_and_lock(lock);
1525                 } else {
1526                         LDLM_DEBUG(lock, "no blocking ast");
1527                 }
1528         }
1529         lock->l_flags |= LDLM_FL_BL_DONE;
1530 }
1531
1532 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1533 {
1534         if (req->l_resource->lr_type != LDLM_PLAIN &&
1535             req->l_resource->lr_type != LDLM_IBITS)
1536                 return;
1537
1538         list_del_init(&req->l_sl_policy);
1539         list_del_init(&req->l_sl_mode);
1540 }
1541
1542 void ldlm_lock_cancel(struct ldlm_lock *lock)
1543 {
1544         struct ldlm_resource *res;
1545         struct ldlm_namespace *ns;
1546         ENTRY;
1547
1548         lock_res_and_lock(lock);
1549
1550         res = lock->l_resource;
1551         ns = res->lr_namespace;
1552
1553         /* Please do not, no matter how tempting, remove this LBUG without
1554          * talking to me first. -phik */
1555         if (lock->l_readers || lock->l_writers) {
1556                 LDLM_ERROR(lock, "lock still has references");
1557                 LBUG();
1558         }
1559
1560         ldlm_del_waiting_lock(lock);
1561
1562         /* Releases res lock */
1563         ldlm_cancel_callback(lock);
1564
1565         /* Yes, second time, just in case it was added again while we were
1566            running with no res lock in ldlm_cancel_callback */
1567         ldlm_del_waiting_lock(lock);
1568         ldlm_resource_unlink_lock(lock);
1569         ldlm_lock_destroy_nolock(lock);
1570
1571         if (lock->l_granted_mode == lock->l_req_mode)
1572                 ldlm_pool_del(&ns->ns_pool, lock);
1573
1574         /* Make sure we will not be called again for same lock what is possible
1575          * if not to zero out lock->l_granted_mode */
1576         lock->l_granted_mode = 0;
1577         unlock_res_and_lock(lock);
1578
1579         EXIT;
1580 }
1581
1582 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1583 {
1584         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1585         ENTRY;
1586
1587         if (lock == NULL)
1588                 RETURN(-EINVAL);
1589
1590         lock->l_ast_data = data;
1591         LDLM_LOCK_PUT(lock);
1592         RETURN(0);
1593 }
1594
1595 void ldlm_cancel_locks_for_export_cb(void *obj, void *data)
1596 {
1597         struct obd_export     *exp = data;
1598         struct ldlm_lock      *lock = obj;
1599         struct ldlm_resource  *res;
1600
1601         res = ldlm_resource_getref(lock->l_resource);
1602         LDLM_LOCK_GET(lock);
1603
1604         LDLM_DEBUG(lock, "export %p", exp);
1605         ldlm_res_lvbo_update(res, NULL, 0, 1);
1606
1607         ldlm_lock_cancel(lock);
1608         ldlm_reprocess_all(res);
1609
1610         ldlm_resource_putref(res);
1611         LDLM_LOCK_PUT(lock);
1612 }
1613
1614 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1615 {
1616         lustre_hash_for_each_empty(exp->exp_lock_hash,
1617                                    ldlm_cancel_locks_for_export_cb, exp);
1618 }
1619
1620 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1621                                         __u32 *flags)
1622 {
1623         CFS_LIST_HEAD(rpc_list);
1624         struct ldlm_resource *res;
1625         struct ldlm_namespace *ns;
1626         int granted = 0;
1627         int old_mode, rc;
1628         struct sl_insert_point prev;
1629         ldlm_error_t err;
1630         struct ldlm_interval *node;
1631         ENTRY;
1632
1633         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1634                 *flags |= LDLM_FL_BLOCK_GRANTED;
1635                 RETURN(lock->l_resource);
1636         }
1637
1638         /* I can't check the type of lock here because the bitlock of lock
1639          * is not held here, so do the allocation blindly. -jay */
1640         OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node));
1641         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
1642                 RETURN(NULL);
1643
1644         LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1645                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1646
1647         lock_res_and_lock(lock);
1648
1649         res = lock->l_resource;
1650         ns = res->lr_namespace;
1651
1652         old_mode = lock->l_req_mode;
1653         lock->l_req_mode = new_mode;
1654         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1655                 /* remember the lock position where the lock might be
1656                  * added back to the granted list later and also
1657                  * remember the join mode for skiplist fixing. */
1658                 prev.res_link = lock->l_res_link.prev;
1659                 prev.mode_link = lock->l_sl_mode.prev;
1660                 prev.policy_link = lock->l_sl_policy.prev;
1661                 ldlm_resource_unlink_lock(lock);
1662         } else {
1663                 ldlm_resource_unlink_lock(lock);
1664                 if (res->lr_type == LDLM_EXTENT) {
1665                         /* FIXME: ugly code, I have to attach the lock to a
1666                          * interval node again since perhaps it will be granted
1667                          * soon */
1668                         CFS_INIT_LIST_HEAD(&node->li_group);
1669                         ldlm_interval_attach(node, lock);
1670                         node = NULL;
1671                 }
1672         }
1673
1674         /* If this is a local resource, put it on the appropriate list. */
1675         if (ns_is_client(res->lr_namespace)) {
1676                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1677                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1678                 } else {
1679                         /* This should never happen, because of the way the
1680                          * server handles conversions. */
1681                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1682                                    *flags);
1683                         LBUG();
1684
1685                         ldlm_grant_lock(lock, &rpc_list);
1686                         granted = 1;
1687                         /* FIXME: completion handling not with ns_lock held ! */
1688                         if (lock->l_completion_ast)
1689                                 lock->l_completion_ast(lock, 0, NULL);
1690                 }
1691         } else {
1692                 int pflags = 0;
1693                 ldlm_processing_policy policy;
1694                 policy = ldlm_processing_policy_table[res->lr_type];
1695                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1696                 if (rc == LDLM_ITER_STOP) {
1697                         lock->l_req_mode = old_mode;
1698                         if (res->lr_type == LDLM_EXTENT)
1699                                 ldlm_extent_add_lock(res, lock);
1700                         else
1701                                 ldlm_granted_list_add_lock(lock, &prev);
1702
1703                         res = NULL;
1704                 } else {
1705                         *flags |= LDLM_FL_BLOCK_GRANTED;
1706                         granted = 1;
1707                 }
1708         }
1709         unlock_res_and_lock(lock);
1710
1711         if (granted)
1712                 ldlm_run_cp_ast_work(&rpc_list);
1713         if (node)
1714                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1715         RETURN(res);
1716 }
1717
1718 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1719 {
1720         struct obd_device *obd = NULL;
1721
1722         if (!((libcfs_debug | D_ERROR) & level))
1723                 return;
1724
1725         if (!lock) {
1726                 CDEBUG(level, "  NULL LDLM lock\n");
1727                 return;
1728         }
1729
1730         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1731                lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1732                pos, lock->l_pid);
1733         if (lock->l_conn_export != NULL)
1734                 obd = lock->l_conn_export->exp_obd;
1735         if (lock->l_export && lock->l_export->exp_connection) {
1736                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1737                      libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1738                      lock->l_remote_handle.cookie);
1739         } else if (obd == NULL) {
1740                 CDEBUG(level, "  Node: local\n");
1741         } else {
1742                 struct obd_import *imp = obd->u.cli.cl_import;
1743                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1744                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1745                        lock->l_remote_handle.cookie);
1746         }
1747         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64")\n", lock->l_resource,
1748                lock->l_resource->lr_name.name[0],
1749                lock->l_resource->lr_name.name[1]);
1750         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1751                "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
1752                ldlm_lockname[lock->l_granted_mode],
1753                atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1754                lock->l_flags);
1755         if (lock->l_resource->lr_type == LDLM_EXTENT)
1756                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1757                        " (req "LPU64"-"LPU64")\n",
1758                        lock->l_policy_data.l_extent.start,
1759                        lock->l_policy_data.l_extent.end,
1760                        lock->l_req_extent.start, lock->l_req_extent.end);
1761         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1762                 CDEBUG(level, "  Pid: %d Extent: "LPU64" -> "LPU64"\n",
1763                        lock->l_policy_data.l_flock.pid,
1764                        lock->l_policy_data.l_flock.start,
1765                        lock->l_policy_data.l_flock.end);
1766        else if (lock->l_resource->lr_type == LDLM_IBITS)
1767                 CDEBUG(level, "  Bits: "LPX64"\n",
1768                        lock->l_policy_data.l_inodebits.bits);
1769 }
1770
1771 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1772 {
1773         struct ldlm_lock *lock;
1774
1775         if (!((libcfs_debug | D_ERROR) & level))
1776                 return;
1777
1778         lock = ldlm_handle2lock(lockh);
1779         if (lock == NULL)
1780                 return;
1781
1782         ldlm_lock_dump(D_OTHER, lock, 0);
1783
1784         LDLM_LOCK_PUT(lock);
1785 }
1786
1787 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1788                       struct libcfs_debug_msg_data *data, const char *fmt,
1789                       ...)
1790 {
1791         va_list args;
1792         cfs_debug_limit_state_t *cdls = data->msg_cdls;
1793
1794         va_start(args, fmt);
1795         if (lock->l_resource == NULL) {
1796                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1797                                    data->msg_fn, data->msg_line, fmt, args,
1798                                    " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1799                                    "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
1800                                    LPX64" expref: %d pid: %u timeout: %lu\n", lock,
1801                                    lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1802                                    lock->l_readers, lock->l_writers,
1803                                    ldlm_lockname[lock->l_granted_mode],
1804                                    ldlm_lockname[lock->l_req_mode],
1805                                    lock->l_flags, lock->l_remote_handle.cookie,
1806                                    lock->l_export ?
1807                                         atomic_read(&lock->l_export->exp_refcount) : -99,
1808                                    lock->l_pid, lock->l_callback_timeout);
1809                 va_end(args);
1810                 return;
1811         }
1812
1813         switch (lock->l_resource->lr_type) {
1814         case LDLM_EXTENT:
1815                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1816                                    data->msg_fn, data->msg_line, fmt, args,
1817                                    " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1818                                    "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1819                                    "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
1820                                     " expref: %d pid: %u timeout %lu\n",
1821                                     lock->l_resource->lr_namespace->ns_name, lock,
1822                                     lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1823                                     lock->l_readers, lock->l_writers,
1824                                     ldlm_lockname[lock->l_granted_mode],
1825                                     ldlm_lockname[lock->l_req_mode],
1826                                     lock->l_resource->lr_name.name[0],
1827                                     lock->l_resource->lr_name.name[1],
1828                                     atomic_read(&lock->l_resource->lr_refcount),
1829                                     ldlm_typename[lock->l_resource->lr_type],
1830                                     lock->l_policy_data.l_extent.start,
1831                                     lock->l_policy_data.l_extent.end,
1832                                     lock->l_req_extent.start, lock->l_req_extent.end,
1833                                     lock->l_flags, lock->l_remote_handle.cookie,
1834                                     lock->l_export ?
1835                                         atomic_read(&lock->l_export->exp_refcount) : -99,
1836                                     lock->l_pid, lock->l_callback_timeout);
1837                 break;
1838         case LDLM_FLOCK:
1839                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1840                                    data->msg_fn, data->msg_line, fmt, args,
1841                                    " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1842                                    "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1843                                    "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
1844                                    " expref: %d pid: %u timeout: %lu\n",
1845                                    lock->l_resource->lr_namespace->ns_name, lock,
1846                                    lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1847                                    lock->l_readers, lock->l_writers,
1848                                    ldlm_lockname[lock->l_granted_mode],
1849                                    ldlm_lockname[lock->l_req_mode],
1850                                    lock->l_resource->lr_name.name[0],
1851                                    lock->l_resource->lr_name.name[1],
1852                                    atomic_read(&lock->l_resource->lr_refcount),
1853                                    ldlm_typename[lock->l_resource->lr_type],
1854                                    lock->l_policy_data.l_flock.pid,
1855                                    lock->l_policy_data.l_flock.start,
1856                                    lock->l_policy_data.l_flock.end,
1857                                    lock->l_flags, lock->l_remote_handle.cookie,
1858                                    lock->l_export ?
1859                                         atomic_read(&lock->l_export->exp_refcount) : -99,
1860                                    lock->l_pid, lock->l_callback_timeout);
1861                 break;
1862         case LDLM_IBITS:
1863                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1864                                    data->msg_fn, data->msg_line, fmt, args,
1865                                    " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1866                                    "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1867                                    "flags: "LPX64" remote: "LPX64" expref: %d "
1868                                    "pid: %u timeout: %lu\n",
1869                                    lock->l_resource->lr_namespace->ns_name,
1870                                    lock, lock->l_handle.h_cookie,
1871                                    atomic_read (&lock->l_refc),
1872                                    lock->l_readers, lock->l_writers,
1873                                    ldlm_lockname[lock->l_granted_mode],
1874                                    ldlm_lockname[lock->l_req_mode],
1875                                    lock->l_resource->lr_name.name[0],
1876                                    lock->l_resource->lr_name.name[1],
1877                                    lock->l_policy_data.l_inodebits.bits,
1878                                    atomic_read(&lock->l_resource->lr_refcount),
1879                                    ldlm_typename[lock->l_resource->lr_type],
1880                                    lock->l_flags, lock->l_remote_handle.cookie,
1881                                    lock->l_export ?
1882                                         atomic_read(&lock->l_export->exp_refcount) : -99,
1883                                    lock->l_pid, lock->l_callback_timeout);
1884                 break;
1885         default:
1886                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level, data->msg_file,
1887                                    data->msg_fn, data->msg_line, fmt, args,
1888                                    " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1889                                    "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
1890                                    "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
1891                                    lock->l_resource->lr_namespace->ns_name,
1892                                    lock, lock->l_handle.h_cookie,
1893                                    atomic_read (&lock->l_refc),
1894                                    lock->l_readers, lock->l_writers,
1895                                    ldlm_lockname[lock->l_granted_mode],
1896                                    ldlm_lockname[lock->l_req_mode],
1897                                    lock->l_resource->lr_name.name[0],
1898                                    lock->l_resource->lr_name.name[1],
1899                                    atomic_read(&lock->l_resource->lr_refcount),
1900                                    ldlm_typename[lock->l_resource->lr_type],
1901                                    lock->l_flags, lock->l_remote_handle.cookie,
1902                                    lock->l_export ?
1903                                          atomic_read(&lock->l_export->exp_refcount) : -99,
1904                                    lock->l_pid, lock->l_callback_timeout);
1905                 break;
1906         }
1907         va_end(args);
1908 }
1909 EXPORT_SYMBOL(_ldlm_lock_debug);