Whamcloud - gitweb
Don't dump bogus locks, we can deref null pointers therein.
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/lustre_mds.h>
31 #include <linux/obd_class.h>
32
33 //struct lustre_lock ldlm_everything_lock;
34
35 /* lock types */
36 char *ldlm_lockname[] = {
37         [0] "--",
38         [LCK_EX] "EX",
39         [LCK_PW] "PW",
40         [LCK_PR] "PR",
41         [LCK_CW] "CW",
42         [LCK_CR] "CR",
43         [LCK_NL] "NL"
44 };
45 char *ldlm_typename[] = {
46         [LDLM_PLAIN] "PLN",
47         [LDLM_EXTENT] "EXT",
48 };
49
50 char *ldlm_it2str(int it)
51 {
52         switch (it) {
53         case IT_OPEN:
54                 return "open";
55         case IT_CREAT:
56                 return "creat";
57         case (IT_OPEN | IT_CREAT):
58                 return "open|creat";
59         case IT_MKDIR:
60                 return "mkdir";
61         case IT_LINK:
62                 return "link";
63         case IT_LINK2:
64                 return "link2";
65         case IT_SYMLINK:
66                 return "symlink";
67         case IT_UNLINK:
68                 return "unlink";
69         case IT_RMDIR:
70                 return "rmdir";
71         case IT_RENAME:
72                 return "rename";
73         case IT_RENAME2:
74                 return "rename2";
75         case IT_READDIR:
76                 return "readdir";
77         case IT_GETATTR:
78                 return "getattr";
79         case IT_SETATTR:
80                 return "setattr";
81         case IT_READLINK:
82                 return "readlink";
83         case IT_MKNOD:
84                 return "mknod";
85         case IT_LOOKUP:
86                 return "lookup";
87         default:
88                 CERROR("Unknown intent %d\n", it);
89                 return "UNKNOWN";
90         }
91 }
92
93 extern kmem_cache_t *ldlm_lock_slab;
94
95 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
96
97 ldlm_res_compat ldlm_res_compat_table[] = {
98         [LDLM_PLAIN] ldlm_plain_compat,
99         [LDLM_EXTENT] ldlm_extent_compat,
100 };
101
102 static ldlm_res_policy ldlm_intent_policy_func;
103
104 static int ldlm_plain_policy(struct ldlm_lock *lock, void *req_cookie,
105                              ldlm_mode_t mode, int flags, void *data)
106 {
107         if ((flags & LDLM_FL_HAS_INTENT) && ldlm_intent_policy_func) {
108                 return ldlm_intent_policy_func(lock, req_cookie, mode, flags, 
109                                                data);
110         }
111
112         return ELDLM_OK;
113 }
114
115 ldlm_res_policy ldlm_res_policy_table[] = {
116         [LDLM_PLAIN] ldlm_plain_policy,
117         [LDLM_EXTENT] ldlm_extent_policy,
118 };
119
120 void ldlm_register_intent(ldlm_res_policy arg)
121 {
122         ldlm_intent_policy_func = arg;
123 }
124
125 void ldlm_unregister_intent(void)
126 {
127         ldlm_intent_policy_func = NULL;
128 }
129
130 /*
131  * REFCOUNTED LOCK OBJECTS
132  */
133
134
135 /*
136  * Lock refcounts, during creation:
137  *   - one special one for allocation, dec'd only once in destroy
138  *   - one for being a lock that's in-use
139  *   - one for the addref associated with a new lock
140  */
141 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
142 {
143         l_lock(&lock->l_resource->lr_namespace->ns_lock);
144         lock->l_refc++;
145         ldlm_resource_getref(lock->l_resource);
146         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
147         return lock;
148 }
149
150 void ldlm_lock_put(struct ldlm_lock *lock)
151 {
152         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
153         ENTRY;
154
155         l_lock(&ns->ns_lock);
156         lock->l_refc--;
157         //LDLM_DEBUG(lock, "after refc--");
158         if (lock->l_refc < 0)
159                 LBUG();
160
161         if (ldlm_resource_put(lock->l_resource)) {
162                 LASSERT(lock->l_refc == 0);
163                 lock->l_resource = NULL;
164         }
165         if (lock->l_parent)
166                 LDLM_LOCK_PUT(lock->l_parent);
167
168         if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
169                 l_unlock(&ns->ns_lock);
170                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
171
172                 //spin_lock(&ldlm_handle_lock);
173                 spin_lock(&ns->ns_counter_lock);
174                 ns->ns_locks--;
175                 spin_unlock(&ns->ns_counter_lock);
176
177                 lock->l_resource = NULL;
178                 lock->l_random = DEAD_HANDLE_MAGIC;
179                 if (lock->l_export && lock->l_export->exp_connection)
180                         ptlrpc_put_connection(lock->l_export->exp_connection);
181                 kmem_cache_free(ldlm_lock_slab, lock);
182                 //spin_unlock(&ldlm_handle_lock);
183                 CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 0).\n",
184                        sizeof(*lock), lock);
185         } else
186                 l_unlock(&ns->ns_lock);
187
188         EXIT;
189 }
190
191 void ldlm_lock_destroy(struct ldlm_lock *lock)
192 {
193         ENTRY;
194         l_lock(&lock->l_resource->lr_namespace->ns_lock);
195
196         if (!list_empty(&lock->l_children)) {
197                 LDLM_DEBUG(lock, "still has children (%p)!",
198                            lock->l_children.next);
199                 ldlm_lock_dump(lock);
200                 LBUG();
201         }
202         if (lock->l_readers || lock->l_writers) {
203                 LDLM_DEBUG(lock, "lock still has references");
204                 ldlm_lock_dump(lock);
205         }
206
207         if (!list_empty(&lock->l_res_link)) {
208                 ldlm_lock_dump(lock);
209                 LBUG();
210         }
211
212         if (lock->l_flags & LDLM_FL_DESTROYED) {
213                 LASSERT(list_empty(&lock->l_lru));
214                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
215                 EXIT;
216                 return;
217         }
218
219         list_del_init(&lock->l_lru);
220         list_del(&lock->l_export_chain);
221         lock->l_export = NULL;
222         lock->l_flags |= LDLM_FL_DESTROYED;
223
224         /* Wake anyone waiting for this lock */
225         /* FIXME: I should probably add yet another flag, instead of using
226          * l_export to only call this on clients */
227         if (lock->l_export && lock->l_completion_ast)
228                 lock->l_completion_ast(lock, 0);
229
230         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
231         LDLM_LOCK_PUT(lock);
232         EXIT;
233 }
234
235 /*
236    usage: pass in a resource on which you have done get
237           pass in a parent lock on which you have done a get
238           do not put the resource or the parent
239    returns: lock with refcount 1
240 */
241 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
242                                        struct ldlm_resource *resource)
243 {
244         struct ldlm_lock *lock;
245         ENTRY;
246
247         if (resource == NULL)
248                 LBUG();
249
250         lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
251         if (lock == NULL)
252                 RETURN(NULL);
253
254         memset(lock, 0, sizeof(*lock));
255         get_random_bytes(&lock->l_random, sizeof(__u64));
256
257         lock->l_resource = resource;
258         /* this refcount matches the one of the resource passed
259            in which is not being put away */
260         lock->l_refc = 1;
261         INIT_LIST_HEAD(&lock->l_children);
262         INIT_LIST_HEAD(&lock->l_res_link);
263         INIT_LIST_HEAD(&lock->l_lru);
264         INIT_LIST_HEAD(&lock->l_export_chain);
265         INIT_LIST_HEAD(&lock->l_pending_chain);
266         init_waitqueue_head(&lock->l_waitq);
267
268         spin_lock(&resource->lr_namespace->ns_counter_lock);
269         resource->lr_namespace->ns_locks++;
270         spin_unlock(&resource->lr_namespace->ns_counter_lock);
271
272         if (parent != NULL) {
273                 l_lock(&parent->l_resource->lr_namespace->ns_lock);
274                 lock->l_parent = parent;
275                 list_add(&lock->l_childof, &parent->l_children);
276                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
277         }
278
279         CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
280                "%p (tot %d).\n", sizeof(*lock), lock, 1);
281         /* this is the extra refcount, to prevent the lock from evaporating */
282         LDLM_LOCK_GET(lock);
283         RETURN(lock);
284 }
285
286 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
287 {
288         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
289         struct ldlm_resource *oldres = lock->l_resource;
290         int i;
291         ENTRY;
292
293         l_lock(&ns->ns_lock);
294         if (memcmp(new_resid, lock->l_resource->lr_name,
295                    sizeof(lock->l_resource->lr_name)) == 0) {
296                 /* Nothing to do */
297                 l_unlock(&ns->ns_lock);
298                 RETURN(0);
299         }
300
301         LASSERT(new_resid[0] != 0);
302
303         /* This function assumes that the lock isn't on any lists */
304         LASSERT(list_empty(&lock->l_res_link));
305
306         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
307                                              lock->l_resource->lr_type, 1);
308         if (lock->l_resource == NULL) {
309                 LBUG();
310                 RETURN(-ENOMEM);
311         }
312
313         /* move references over */
314         for (i = 0; i < lock->l_refc; i++) {
315                 int rc;
316                 ldlm_resource_getref(lock->l_resource);
317                 rc = ldlm_resource_put(oldres);
318                 if (rc == 1 && i != lock->l_refc - 1)
319                         LBUG();
320         }
321         /* compensate for the initial get above.. */
322         ldlm_resource_put(lock->l_resource);
323
324         l_unlock(&ns->ns_lock);
325         RETURN(0);
326 }
327
328 /*
329  *  HANDLES
330  */
331
332 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
333 {
334         lockh->addr = (__u64) (unsigned long)lock;
335         lockh->cookie = lock->l_random;
336 }
337
338 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle,
339                                      int strict)
340 {
341         struct ldlm_lock *lock = NULL, *retval = NULL;
342         ENTRY;
343
344         if (!handle || !handle->addr)
345                 RETURN(NULL);
346
347         //spin_lock(&ldlm_handle_lock);
348         lock = (struct ldlm_lock *)(unsigned long)(handle->addr);
349         if (!kmem_cache_validate(ldlm_lock_slab, (void *)lock)) {
350                 //CERROR("bogus lock %p\n", lock);
351                 GOTO(out2, retval);
352         }
353
354         if (lock->l_random != handle->cookie) {
355                 CERROR("bogus cookie: lock %p has "LPX64" vs. handle "LPX64"\n",
356                        lock, lock->l_random, handle->cookie);
357                 GOTO(out2, NULL);
358         }
359         if (!lock->l_resource) {
360                 CERROR("trying to lock bogus resource: lock %p\n", lock);
361                 //LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
362                 GOTO(out2, retval);
363         }
364         if (!lock->l_resource->lr_namespace) {
365                 CERROR("trying to lock bogus namespace: lock %p\n", lock);
366                 //LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
367                 GOTO(out2, retval);
368         }
369
370         l_lock(&lock->l_resource->lr_namespace->ns_lock);
371         if (strict && lock->l_flags & LDLM_FL_DESTROYED) {
372                 CERROR("lock already destroyed: lock %p\n", lock);
373                 //LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
374                 GOTO(out, NULL);
375         }
376
377         retval = LDLM_LOCK_GET(lock);
378         if (!retval)
379                 CERROR("lock disappeared below us!!! %p\n", lock);
380         EXIT;
381  out:
382         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
383  out2:
384         //spin_unlock(&ldlm_handle_lock);
385         return retval;
386 }
387
388 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
389 {
390         return lockmode_compat(a->l_req_mode, b->l_req_mode);
391 }
392
393 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
394 {
395         ldlm_res2desc(lock->l_resource, &desc->l_resource);
396         desc->l_req_mode = lock->l_req_mode;
397         desc->l_granted_mode = lock->l_granted_mode;
398         memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
399         memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
400 }
401
402 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
403                                    struct ldlm_lock *new)
404 {
405         struct ldlm_ast_work *w;
406         ENTRY;
407
408         l_lock(&lock->l_resource->lr_namespace->ns_lock);
409         if (new && (lock->l_flags & LDLM_FL_AST_SENT))
410                 GOTO(out, 0);
411
412         OBD_ALLOC(w, sizeof(*w));
413         if (!w) {
414                 LBUG();
415                 GOTO(out, 0);
416         }
417
418         if (new) {
419                 lock->l_flags |= LDLM_FL_AST_SENT;
420                 w->w_blocking = 1;
421                 ldlm_lock2desc(new, &w->w_desc);
422         }
423
424         w->w_lock = LDLM_LOCK_GET(lock);
425         list_add(&w->w_list, lock->l_resource->lr_tmp);
426       out:
427         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
428         return;
429 }
430
431 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
432 {
433         struct ldlm_lock *lock;
434
435         lock = ldlm_handle2lock(lockh);
436         ldlm_lock_addref_internal(lock, mode);
437         LDLM_LOCK_PUT(lock);
438 }
439
440 /* only called for local locks */
441 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
442 {
443         l_lock(&lock->l_resource->lr_namespace->ns_lock);
444
445         if (!list_empty(&lock->l_lru)) { 
446                 list_del_init(&lock->l_lru);
447                 lock->l_resource->lr_namespace->ns_nr_unused--;
448                 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
449         }
450
451         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
452                 lock->l_readers++;
453         else
454                 lock->l_writers++;
455         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
456         LDLM_LOCK_GET(lock);
457         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
458 }
459
460 /* Args: unlocked lock */
461 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
462                                     __u64 *res_id, int flags);
463
464 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
465 {
466         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
467         struct ldlm_namespace *ns;
468         ENTRY;
469
470         if (lock == NULL)
471                 LBUG();
472
473         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
474         ns = lock->l_resource->lr_namespace;
475         l_lock(&lock->l_resource->lr_namespace->ns_lock);
476         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
477                 lock->l_readers--;
478         else
479                 lock->l_writers--;
480
481         /* If we received a blocked AST and this was the last reference,
482          * run the callback. */
483         if (!lock->l_readers && !lock->l_writers &&
484             (lock->l_flags & LDLM_FL_CBPENDING)) {
485                 if (!lock->l_resource->lr_namespace->ns_client &&
486                     lock->l_export)
487                         CERROR("FL_CBPENDING set on non-local lock--just a "
488                                "warning\n");
489
490                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
491                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
492
493                 /* FIXME: need a real 'desc' here */
494                 lock->l_blocking_ast(lock, NULL, lock->l_data,
495                                      lock->l_data_len, LDLM_CB_BLOCKING);
496         } else if (ns->ns_client && !lock->l_readers && !lock->l_writers) {
497                 LASSERT(list_empty(&lock->l_lru));
498                 LASSERT(ns->ns_nr_unused >= 0);
499                 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
500                 ns->ns_nr_unused++;
501                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
502                 ldlm_cancel_lru(ns);
503         } else
504                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
505
506         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
507         LDLM_LOCK_PUT(lock);    /* matches the handle2lock above */
508
509         EXIT;
510 }
511
512 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
513                                  struct list_head *queue)
514 {
515         struct list_head *tmp, *pos;
516         int rc = 1;
517
518         list_for_each_safe(tmp, pos, queue) {
519                 struct ldlm_lock *child;
520                 ldlm_res_compat compat;
521
522                 child = list_entry(tmp, struct ldlm_lock, l_res_link);
523                 if (lock == child)
524                         continue;
525
526                 compat = ldlm_res_compat_table[child->l_resource->lr_type];
527                 if (compat && compat(child, lock)) {
528                         CDEBUG(D_OTHER, "compat function succeded, next.\n");
529                         continue;
530                 }
531                 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
532                         CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
533                         continue;
534                 }
535
536                 rc = 0;
537
538                 if (send_cbs && child->l_blocking_ast != NULL) {
539                         CDEBUG(D_OTHER, "lock %p incompatible; sending "
540                                "blocking AST.\n", child);
541                         ldlm_add_ast_work_item(child, lock);
542                 }
543         }
544
545         return rc;
546 }
547
548 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
549 {
550         int rc;
551         ENTRY;
552
553         l_lock(&lock->l_resource->lr_namespace->ns_lock);
554         rc = ldlm_lock_compat_list(lock, send_cbs,
555                                    &lock->l_resource->lr_granted);
556         /* FIXME: should we be sending ASTs to converting? */
557         if (rc)
558                 rc = ldlm_lock_compat_list
559                         (lock, send_cbs, &lock->l_resource->lr_converting);
560
561         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
562         RETURN(rc);
563 }
564
565 /* NOTE: called by
566    - ldlm_handle_enqueuque - resource
567 */
568 void ldlm_grant_lock(struct ldlm_lock *lock)
569 {
570         struct ldlm_resource *res = lock->l_resource;
571         ENTRY;
572
573         l_lock(&lock->l_resource->lr_namespace->ns_lock);
574         ldlm_resource_add_lock(res, &res->lr_granted, lock);
575         lock->l_granted_mode = lock->l_req_mode;
576
577         if (lock->l_granted_mode < res->lr_most_restr)
578                 res->lr_most_restr = lock->l_granted_mode;
579
580         if (lock->l_completion_ast) {
581                 ldlm_add_ast_work_item(lock, NULL);
582         }
583         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
584         EXIT;
585 }
586
587 /* returns a referenced lock or NULL */
588 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
589                                       struct ldlm_extent *extent,
590                                       struct ldlm_lock *old_lock)
591 {
592         struct ldlm_lock *lock;
593         struct list_head *tmp;
594
595         list_for_each(tmp, queue) {
596                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
597
598                 if (lock == old_lock)
599                         continue;
600
601                 if (lock->l_flags & (LDLM_FL_CBPENDING | LDLM_FL_DESTROYED))
602                         continue;
603
604                 if (lock->l_req_mode != mode)
605                         continue;
606
607                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
608                     (lock->l_extent.start > extent->start ||
609                      lock->l_extent.end < extent->end))
610                         continue;
611
612                 ldlm_lock_addref_internal(lock, mode);
613                 return lock;
614         }
615
616         return NULL;
617 }
618
619 /* Can be called in two ways:
620  *
621  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
622  * for a duplicate of.
623  *
624  * Otherwise, all of the fields must be filled in, to match against.
625  *
626  * Returns 1 if it finds an already-existing lock that is compatible; in this
627  * case, lockh is filled in with a addref()ed lock
628  */
629 int ldlm_lock_match(struct ldlm_namespace *ns, __u64 *res_id, __u32 type,
630                     void *cookie, int cookielen, ldlm_mode_t mode,
631                     struct lustre_handle *lockh)
632 {
633         struct ldlm_resource *res;
634         struct ldlm_lock *lock, *old_lock = NULL;
635         int rc = 0;
636         ENTRY;
637
638         if (ns == NULL) {
639                 old_lock = ldlm_handle2lock(lockh);
640                 LASSERT(old_lock);
641
642                 ns = old_lock->l_resource->lr_namespace;
643                 res_id = old_lock->l_resource->lr_name;
644                 type = old_lock->l_resource->lr_type;
645                 mode = old_lock->l_req_mode;
646         }
647
648         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
649         if (res == NULL) {
650                 LASSERT(old_lock == NULL);
651                 RETURN(0);
652         }
653
654         l_lock(&ns->ns_lock);
655
656         if ((lock = search_queue(&res->lr_granted, mode, cookie, old_lock)))
657                 GOTO(out, rc = 1);
658         if ((lock = search_queue(&res->lr_converting, mode, cookie, old_lock)))
659                 GOTO(out, rc = 1);
660         if ((lock = search_queue(&res->lr_waiting, mode, cookie, old_lock)))
661                 GOTO(out, rc = 1);
662
663         EXIT;
664        out:
665         ldlm_resource_put(res);
666         l_unlock(&ns->ns_lock);
667
668         if (lock) {
669                 ldlm_lock2handle(lock, lockh);
670                 if (lock->l_completion_ast)
671                         lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
672         }
673         if (rc)
674                 LDLM_DEBUG(lock, "matched");
675         else
676                 LDLM_DEBUG_NOLOCK("not matched");
677
678         if (old_lock)
679                 LDLM_LOCK_PUT(old_lock);
680
681         return rc;
682 }
683
684 /* Returns a referenced lock */
685 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
686                                    struct lustre_handle *parent_lock_handle,
687                                    __u64 * res_id, __u32 type,
688                                    ldlm_mode_t mode, void *data, __u32 data_len)
689 {
690         struct ldlm_resource *res, *parent_res = NULL;
691         struct ldlm_lock *lock, *parent_lock;
692
693         parent_lock = ldlm_handle2lock(parent_lock_handle);
694         if (parent_lock)
695                 parent_res = parent_lock->l_resource;
696
697         res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
698         if (res == NULL)
699                 RETURN(NULL);
700
701         lock = ldlm_lock_new(parent_lock, res);
702         if (lock == NULL) {
703                 ldlm_resource_put(res);
704                 RETURN(NULL);
705         }
706
707         lock->l_req_mode = mode;
708         lock->l_data = data;
709         lock->l_data_len = data_len;
710
711         return lock;
712 }
713
714 /* Must be called with lock->l_lock and lock->l_resource->lr_lock not held */
715 ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock * lock,
716                                void *cookie, int cookie_len,
717                                int *flags,
718                                ldlm_completion_callback completion,
719                                ldlm_blocking_callback blocking)
720 {
721         struct ldlm_resource *res;
722         int local;
723         ldlm_res_policy policy;
724         ENTRY;
725
726         res = lock->l_resource;
727         lock->l_blocking_ast = blocking;
728
729         if (res->lr_type == LDLM_EXTENT)
730                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
731
732         /* policies are not executed on the client or during replay */
733         local = res->lr_namespace->ns_client;
734         if (!local && !(*flags & LDLM_FL_REPLAY) &&
735             (policy = ldlm_res_policy_table[res->lr_type])) {
736                 int rc;
737                 rc = policy(lock, cookie, lock->l_req_mode, *flags, NULL);
738
739                 if (rc == ELDLM_LOCK_CHANGED) {
740                         res = lock->l_resource;
741                         *flags |= LDLM_FL_LOCK_CHANGED;
742                 } else if (rc == ELDLM_LOCK_ABORTED) {
743                         ldlm_lock_destroy(lock);
744                         RETURN(rc);
745                 }
746         }
747
748         l_lock(&res->lr_namespace->ns_lock);
749         if (local && lock->l_req_mode == lock->l_granted_mode) {
750                 /* The server returned a blocked lock, but it was granted before
751                  * we got a chance to actually enqueue it.  We don't need to do
752                  * anything else. */
753                 *flags &= ~(LDLM_FL_BLOCK_GRANTED | 
754                           LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
755                 GOTO(out, ELDLM_OK);
756         }
757
758         /* This distinction between local lock trees is very important; a client
759          * namespace only has information about locks taken by that client, and
760          * thus doesn't have enough information to decide for itself if it can
761          * be granted (below).  In this case, we do exactly what the server
762          * tells us to do, as dictated by the 'flags'.
763          *
764          * We do exactly the same thing during recovery, when the server is
765          * more or less trusting the clients not to lie.
766          *
767          * FIXME (bug 629283): Detect obvious lies by checking compatibility in
768          * granted/converting queues. */
769         ldlm_resource_unlink_lock(lock);
770         if (local || (*flags & LDLM_FL_REPLAY)) {
771                 if (*flags & LDLM_FL_BLOCK_CONV)
772                         ldlm_resource_add_lock(res, res->lr_converting.prev,
773                                                lock);
774                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
775                         ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
776                 else
777                         ldlm_grant_lock(lock);
778                 GOTO(out, ELDLM_OK);
779         }
780
781         /* FIXME: We may want to optimize by checking lr_most_restr */
782         if (!list_empty(&res->lr_converting)) {
783                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
784                 *flags |= LDLM_FL_BLOCK_CONV;
785                 GOTO(out, ELDLM_OK);
786         }
787         if (!list_empty(&res->lr_waiting)) {
788                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
789                 *flags |= LDLM_FL_BLOCK_WAIT;
790                 GOTO(out, ELDLM_OK);
791         }
792         if (!ldlm_lock_compat(lock, 0)) {
793                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
794                 *flags |= LDLM_FL_BLOCK_GRANTED;
795                 GOTO(out, ELDLM_OK);
796         }
797
798         ldlm_grant_lock(lock);
799         EXIT;
800       out:
801         l_unlock(&res->lr_namespace->ns_lock);
802         /* Don't set 'completion_ast' until here so that if the lock is granted
803          * immediately we don't do an unnecessary completion call. */
804         lock->l_completion_ast = completion;
805         return ELDLM_OK;
806 }
807
808 /* Must be called with namespace taken: queue is waiting or converting. */
809 static int ldlm_reprocess_queue(struct ldlm_resource *res,
810                                 struct list_head *queue)
811 {
812         struct list_head *tmp, *pos;
813         ENTRY;
814
815         list_for_each_safe(tmp, pos, queue) {
816                 struct ldlm_lock *pending;
817                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
818
819                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
820
821                 if (!ldlm_lock_compat(pending, 1))
822                         RETURN(1);
823
824                 list_del_init(&pending->l_res_link);
825                 ldlm_grant_lock(pending);
826         }
827
828         RETURN(0);
829 }
830
831 void ldlm_run_ast_work(struct list_head *rpc_list)
832 {
833         struct list_head *tmp, *pos;
834         int rc;
835         ENTRY;
836
837         list_for_each_safe(tmp, pos, rpc_list) {
838                 struct ldlm_ast_work *w =
839                         list_entry(tmp, struct ldlm_ast_work, w_list);
840
841                 if (w->w_blocking)
842                         rc = w->w_lock->l_blocking_ast
843                                 (w->w_lock, &w->w_desc, w->w_data,
844                                  w->w_datalen, LDLM_CB_BLOCKING);
845                 else
846                         rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
847                 if (rc)
848                         CERROR("Failed AST - should clean & disconnect "
849                                "client\n");
850                 LDLM_LOCK_PUT(w->w_lock);
851                 list_del(&w->w_list);
852                 OBD_FREE(w, sizeof(*w));
853         }
854         EXIT;
855 }
856
857 /* Must be called with resource->lr_lock not taken. */
858 void ldlm_reprocess_all(struct ldlm_resource *res)
859 {
860         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
861         ENTRY;
862
863         /* Local lock trees don't get reprocessed. */
864         if (res->lr_namespace->ns_client) {
865                 EXIT;
866                 return;
867         }
868
869         l_lock(&res->lr_namespace->ns_lock);
870         res->lr_tmp = &rpc_list;
871
872         ldlm_reprocess_queue(res, &res->lr_converting);
873         if (list_empty(&res->lr_converting))
874                 ldlm_reprocess_queue(res, &res->lr_waiting);
875
876         res->lr_tmp = NULL;
877         l_unlock(&res->lr_namespace->ns_lock);
878
879         ldlm_run_ast_work(&rpc_list);
880         EXIT;
881 }
882
883 void ldlm_cancel_callback(struct ldlm_lock *lock)
884 {
885         l_lock(&lock->l_resource->lr_namespace->ns_lock);
886         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
887                 lock->l_flags |= LDLM_FL_CANCEL;
888                 if (lock->l_blocking_ast)
889                         lock->l_blocking_ast(lock, NULL, lock->l_data,
890                                              lock->l_data_len,
891                                              LDLM_CB_CANCELING);
892                 else
893                         LDLM_DEBUG(lock, "no blocking ast");
894         }
895         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
896 }
897
898 void ldlm_lock_cancel(struct ldlm_lock *lock)
899 {
900         struct ldlm_resource *res;
901         struct ldlm_namespace *ns;
902         ENTRY;
903
904         res = lock->l_resource;
905         ns = res->lr_namespace;
906
907         l_lock(&ns->ns_lock);
908         if (lock->l_readers || lock->l_writers)
909                 LDLM_DEBUG(lock, "lock still has references");
910
911         ldlm_cancel_callback(lock);
912
913         ldlm_del_waiting_lock(lock);
914         ldlm_resource_unlink_lock(lock);
915         ldlm_lock_destroy(lock);
916         l_unlock(&ns->ns_lock);
917         EXIT;
918 }
919
920 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data, int datalen)
921 {
922         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
923         ENTRY;
924
925         if (lock == NULL)
926                 RETURN(-EINVAL);
927
928         lock->l_data = data;
929         lock->l_data_len = datalen;
930
931         LDLM_LOCK_PUT(lock);
932
933         RETURN(0);
934 }
935
936 void ldlm_cancel_locks_for_export(struct obd_export *exp)
937 {
938         struct list_head *iter, *n; /* MUST BE CALLED "n"! */
939
940         list_for_each_safe(iter, n, &exp->exp_ldlm_data.led_held_locks) {
941                 struct ldlm_lock *lock;
942                 struct ldlm_resource *res;
943                 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
944                 res = ldlm_resource_getref(lock->l_resource);
945                 LDLM_DEBUG(lock, "export %p", exp);
946                 ldlm_lock_cancel(lock);
947                 ldlm_reprocess_all(res);
948                 ldlm_resource_put(res);
949         }
950 }
951
952 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
953                                         int *flags)
954 {
955         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
956         struct ldlm_resource *res;
957         struct ldlm_namespace *ns;
958         int granted = 0;
959         ENTRY;
960
961         res = lock->l_resource;
962         ns = res->lr_namespace;
963
964         l_lock(&ns->ns_lock);
965
966         lock->l_req_mode = new_mode;
967         ldlm_resource_unlink_lock(lock);
968
969         /* If this is a local resource, put it on the appropriate list. */
970         if (res->lr_namespace->ns_client) {
971                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED))
972                         ldlm_resource_add_lock(res, res->lr_converting.prev,
973                                                lock);
974                 else {
975                         /* This should never happen, because of the way the
976                          * server handles conversions. */
977                         LBUG();
978
979                         res->lr_tmp = &rpc_list;
980                         ldlm_grant_lock(lock);
981                         res->lr_tmp = NULL;
982                         granted = 1;
983                         /* FIXME: completion handling not with ns_lock held ! */
984                         if (lock->l_completion_ast)
985                                 lock->l_completion_ast(lock, 0);
986                 }
987         } else {
988                 /* FIXME: We should try the conversion right away and possibly
989                  * return success without the need for an extra AST */
990                 ldlm_resource_add_lock(res, res->lr_converting.prev, lock);
991                 *flags |= LDLM_FL_BLOCK_CONV;
992         }
993
994         l_unlock(&ns->ns_lock);
995
996         if (granted)
997                 ldlm_run_ast_work(&rpc_list);
998         RETURN(res);
999 }
1000
1001 void ldlm_lock_dump(struct ldlm_lock *lock)
1002 {
1003         char ver[128];
1004
1005         if (!(portal_debug & D_OTHER))
1006                 return;
1007
1008         if (RES_VERSION_SIZE != 4)
1009                 LBUG();
1010
1011         if (!lock) {
1012                 CDEBUG(D_OTHER, "  NULL LDLM lock\n");
1013                 return;
1014         }
1015
1016         snprintf(ver, sizeof(ver), "%x %x %x %x",
1017                  lock->l_version[0], lock->l_version[1],
1018                  lock->l_version[2], lock->l_version[3]);
1019
1020         CDEBUG(D_OTHER, "  -- Lock dump: %p (%s)\n", lock, ver);
1021         if (lock->l_export && lock->l_export->exp_connection)
1022                 CDEBUG(D_OTHER, "  Node: NID %x (rhandle: "LPX64")\n",
1023                        lock->l_export->exp_connection->c_peer.peer_nid,
1024                        lock->l_remote_handle.addr);
1025         else
1026                 CDEBUG(D_OTHER, "  Node: local\n");
1027         CDEBUG(D_OTHER, "  Parent: %p\n", lock->l_parent);
1028         CDEBUG(D_OTHER, "  Resource: %p ("LPD64")\n", lock->l_resource,
1029                lock->l_resource->lr_name[0]);
1030         CDEBUG(D_OTHER, "  Requested mode: %d, granted mode: %d\n",
1031                (int)lock->l_req_mode, (int)lock->l_granted_mode);
1032         CDEBUG(D_OTHER, "  Readers: %u ; Writers; %u\n",
1033                lock->l_readers, lock->l_writers);
1034         if (lock->l_resource->lr_type == LDLM_EXTENT)
1035                 CDEBUG(D_OTHER, "  Extent: %Lu -> %Lu\n",
1036                        (unsigned long long)lock->l_extent.start,
1037                        (unsigned long long)lock->l_extent.end);
1038 }