Whamcloud - gitweb
Fix the connection refcount leaks. There were actually 3 separate bugs.
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/lustre_mds.h>
31 #include <linux/obd_class.h>
32
33 /* this lock protects ldlm_handle2lock's integrity */
34 //static spinlock_t ldlm_handle_lock = SPIN_LOCK_UNLOCKED;
35
36 /* lock types */
37 char *ldlm_lockname[] = {
38         [0] "--",
39         [LCK_EX] "EX",
40         [LCK_PW] "PW",
41         [LCK_PR] "PR",
42         [LCK_CW] "CW",
43         [LCK_CR] "CR",
44         [LCK_NL] "NL"
45 };
46 char *ldlm_typename[] = {
47         [LDLM_PLAIN] "PLN",
48         [LDLM_EXTENT] "EXT",
49         [LDLM_MDSINTENT] "INT"
50 };
51
52 char *ldlm_it2str(int it)
53 {
54         switch (it) {
55         case IT_OPEN:
56                 return "open";
57         case IT_CREAT:
58                 return "creat";
59         case (IT_OPEN | IT_CREAT):
60                 return "open|creat";
61         case IT_MKDIR:
62                 return "mkdir";
63         case IT_LINK:
64                 return "link";
65         case IT_LINK2:
66                 return "link2";
67         case IT_SYMLINK:
68                 return "symlink";
69         case IT_UNLINK:
70                 return "unlink";
71         case IT_RMDIR:
72                 return "rmdir";
73         case IT_RENAME:
74                 return "rename";
75         case IT_RENAME2:
76                 return "rename2";
77         case IT_READDIR:
78                 return "readdir";
79         case IT_GETATTR:
80                 return "getattr";
81         case IT_SETATTR:
82                 return "setattr";
83         case IT_READLINK:
84                 return "readlink";
85         case IT_MKNOD:
86                 return "mknod";
87         case IT_LOOKUP:
88                 return "lookup";
89         default:
90                 CERROR("Unknown intent %d\n", it);
91                 return "UNKNOWN";
92         }
93 }
94
95 extern kmem_cache_t *ldlm_lock_slab;
96
97 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
98
99 ldlm_res_compat ldlm_res_compat_table[] = {
100         [LDLM_PLAIN] ldlm_plain_compat,
101         [LDLM_EXTENT] ldlm_extent_compat,
102         [LDLM_MDSINTENT] ldlm_plain_compat
103 };
104
105 ldlm_res_policy ldlm_res_policy_table[] = {
106         [LDLM_PLAIN] NULL,
107         [LDLM_EXTENT] ldlm_extent_policy,
108         [LDLM_MDSINTENT] NULL
109 };
110
111 void ldlm_register_intent(int (*arg) (struct ldlm_lock * lock, void *req_cookie,
112                                       ldlm_mode_t mode, void *data))
113 {
114         ldlm_res_policy_table[LDLM_MDSINTENT] = arg;
115 }
116
117 void ldlm_unregister_intent(void)
118 {
119         ldlm_res_policy_table[LDLM_MDSINTENT] = NULL;
120 }
121
122 /*
123  * REFCOUNTED LOCK OBJECTS
124  */
125
126
127 /*
128  * Lock refcounts, during creation:
129  *   - one special one for allocation, dec'd only once in destroy
130  *   - one for being a lock that's in-use
131  *   - one for the addref associated with a new lock
132  */
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
134 {
135         l_lock(&lock->l_resource->lr_namespace->ns_lock);
136         lock->l_refc++;
137         ldlm_resource_getref(lock->l_resource);
138         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
139         return lock;
140 }
141
142 void ldlm_lock_put(struct ldlm_lock *lock)
143 {
144         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
145         ENTRY;
146
147         l_lock(&ns->ns_lock);
148         lock->l_refc--;
149         //LDLM_DEBUG(lock, "after refc--");
150         if (lock->l_refc < 0)
151                 LBUG();
152
153         if (ldlm_resource_put(lock->l_resource))
154                 lock->l_resource = NULL;
155         if (lock->l_parent)
156                 LDLM_LOCK_PUT(lock->l_parent);
157
158         if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
159                 l_unlock(&ns->ns_lock);
160                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
161
162                 //spin_lock(&ldlm_handle_lock);
163                 spin_lock(&ns->ns_counter_lock);
164                 ns->ns_locks--;
165                 spin_unlock(&ns->ns_counter_lock);
166
167                 lock->l_resource = NULL;
168                 lock->l_random = DEAD_HANDLE_MAGIC;
169                 if (lock->l_export && lock->l_export->exp_connection)
170                         ptlrpc_put_connection(lock->l_export->exp_connection);
171                 kmem_cache_free(ldlm_lock_slab, lock);
172                 //spin_unlock(&ldlm_handle_lock);
173                 CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 0).\n",
174                        sizeof(*lock), lock);
175         } else
176                 l_unlock(&ns->ns_lock);
177
178         EXIT;
179 }
180
181 void ldlm_lock_destroy(struct ldlm_lock *lock)
182 {
183         ENTRY;
184         l_lock(&lock->l_resource->lr_namespace->ns_lock);
185
186         if (!list_empty(&lock->l_children)) {
187                 LDLM_DEBUG(lock, "still has children (%p)!",
188                            lock->l_children.next);
189                 ldlm_lock_dump(lock);
190                 LBUG();
191         }
192         if (lock->l_readers || lock->l_writers) {
193                 LDLM_DEBUG(lock, "lock still has references");
194                 ldlm_lock_dump(lock);
195                 LBUG();
196         }
197
198         if (!list_empty(&lock->l_res_link)) {
199                 ldlm_lock_dump(lock);
200                 LBUG();
201         }
202
203         if (lock->l_flags & LDLM_FL_DESTROYED) {
204                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
205                 EXIT;
206                 return;
207         }
208
209         list_del(&lock->l_export_chain);
210         lock->l_export = NULL;
211         lock->l_flags |= LDLM_FL_DESTROYED;
212
213         /* Wake anyone waiting for this lock */
214         /* FIXME: I should probably add yet another flag, instead of using
215          * l_export to only call this on clients */
216         if (lock->l_export && lock->l_completion_ast)
217                 lock->l_completion_ast(lock, 0);
218
219         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
220         LDLM_LOCK_PUT(lock);
221         EXIT;
222 }
223
224 /*
225    usage: pass in a resource on which you have done get
226           pass in a parent lock on which you have done a get
227           do not put the resource or the parent
228    returns: lock with refcount 1
229 */
230 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
231                                        struct ldlm_resource *resource)
232 {
233         struct ldlm_lock *lock;
234         ENTRY;
235
236         if (resource == NULL)
237                 LBUG();
238
239         lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
240         if (lock == NULL)
241                 RETURN(NULL);
242
243         memset(lock, 0, sizeof(*lock));
244         get_random_bytes(&lock->l_random, sizeof(__u64));
245
246         lock->l_resource = resource;
247         /* this refcount matches the one of the resource passed
248            in which is not being put away */
249         lock->l_refc = 1;
250         INIT_LIST_HEAD(&lock->l_children);
251         INIT_LIST_HEAD(&lock->l_res_link);
252         INIT_LIST_HEAD(&lock->l_export_chain);
253         INIT_LIST_HEAD(&lock->l_pending_chain);
254         init_waitqueue_head(&lock->l_waitq);
255
256         spin_lock(&resource->lr_namespace->ns_counter_lock);
257         resource->lr_namespace->ns_locks++;
258         spin_unlock(&resource->lr_namespace->ns_counter_lock);
259
260         if (parent != NULL) {
261                 l_lock(&parent->l_resource->lr_namespace->ns_lock);
262                 lock->l_parent = parent;
263                 list_add(&lock->l_childof, &parent->l_children);
264                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
265         }
266
267         CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
268                "%p (tot %d).\n", sizeof(*lock), lock, 1);
269         /* this is the extra refcount, to prevent the lock from evaporating */
270         LDLM_LOCK_GET(lock);
271         RETURN(lock);
272 }
273
274 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
275 {
276         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
277         struct ldlm_resource *oldres = lock->l_resource;
278         int type, i;
279         ENTRY;
280
281         l_lock(&ns->ns_lock);
282         if (memcmp(new_resid, lock->l_resource->lr_name,
283                    sizeof(lock->l_resource->lr_name)) == 0) {
284                 /* Nothing to do */
285                 l_unlock(&ns->ns_lock);
286                 RETURN(0);
287         }
288
289         type = lock->l_resource->lr_type;
290         if (new_resid[0] == 0)
291                 LBUG();
292         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid, type, 1);
293         if (lock->l_resource == NULL) {
294                 LBUG();
295                 RETURN(-ENOMEM);
296         }
297
298         /* move references over */
299         for (i = 0; i < lock->l_refc; i++) {
300                 int rc;
301                 ldlm_resource_getref(lock->l_resource);
302                 rc = ldlm_resource_put(oldres);
303                 if (rc == 1 && i != lock->l_refc - 1)
304                         LBUG();
305         }
306         /* compensate for the initial get above.. */
307         ldlm_resource_put(lock->l_resource);
308
309         l_unlock(&ns->ns_lock);
310         RETURN(0);
311 }
312
313 /*
314  *  HANDLES
315  */
316
317 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
318 {
319         lockh->addr = (__u64) (unsigned long)lock;
320         lockh->cookie = lock->l_random;
321 }
322
323 struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
324 {
325         struct ldlm_lock *lock = NULL, *retval = NULL;
326         ENTRY;
327
328         if (!handle || !handle->addr)
329                 RETURN(NULL);
330
331         //spin_lock(&ldlm_handle_lock);
332         lock = (struct ldlm_lock *)(unsigned long)(handle->addr);
333         if (!kmem_cache_validate(ldlm_lock_slab, (void *)lock)) {
334                 CERROR("bogus lock %p\n", lock);
335                 GOTO(out2, retval);
336         }
337
338         if (lock->l_random != handle->cookie) {
339                 CERROR("bogus cookie: lock %p has "LPX64" vs. handle "LPX64"\n",
340                        lock, lock->l_random, handle->cookie);
341                 GOTO(out2, NULL);
342         }
343         if (!lock->l_resource) {
344                 CERROR("trying to lock bogus resource: lock %p\n", lock);
345                 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
346                 GOTO(out2, retval);
347         }
348         if (!lock->l_resource->lr_namespace) {
349                 CERROR("trying to lock bogus namespace: lock %p\n", lock);
350                 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
351                 GOTO(out2, retval);
352         }
353
354         l_lock(&lock->l_resource->lr_namespace->ns_lock);
355         if (lock->l_flags & LDLM_FL_DESTROYED) {
356                 CERROR("lock already destroyed: lock %p\n", lock);
357                 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
358                 GOTO(out, NULL);
359         }
360
361         retval = LDLM_LOCK_GET(lock);
362         if (!retval)
363                 CERROR("lock disappeared below us!!! %p\n", lock);
364         EXIT;
365  out:
366         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
367  out2:
368         //spin_unlock(&ldlm_handle_lock);
369         return retval;
370 }
371
372 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
373 {
374         return lockmode_compat(a->l_req_mode, b->l_req_mode);
375 }
376
377 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
378 {
379         ldlm_res2desc(lock->l_resource, &desc->l_resource);
380         desc->l_req_mode = lock->l_req_mode;
381         desc->l_granted_mode = lock->l_granted_mode;
382         memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
383         memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
384 }
385
386 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
387                                    struct ldlm_lock *new)
388 {
389         struct ldlm_ast_work *w;
390         ENTRY;
391
392         l_lock(&lock->l_resource->lr_namespace->ns_lock);
393         if (new && (lock->l_flags & LDLM_FL_AST_SENT))
394                 GOTO(out, 0);
395
396         OBD_ALLOC(w, sizeof(*w));
397         if (!w) {
398                 LBUG();
399                 GOTO(out, 0);
400         }
401
402         if (new) {
403                 lock->l_flags |= LDLM_FL_AST_SENT;
404                 w->w_blocking = 1;
405                 ldlm_lock2desc(new, &w->w_desc);
406         }
407
408         w->w_lock = LDLM_LOCK_GET(lock);
409         list_add(&w->w_list, lock->l_resource->lr_tmp);
410       out:
411         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
412         return;
413 }
414
415 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
416 {
417         struct ldlm_lock *lock;
418
419         lock = ldlm_handle2lock(lockh);
420         ldlm_lock_addref_internal(lock, mode);
421         LDLM_LOCK_PUT(lock);
422 }
423
424 /* only called for local locks */
425 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
426 {
427         l_lock(&lock->l_resource->lr_namespace->ns_lock);
428         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
429                 lock->l_readers++;
430         else
431                 lock->l_writers++;
432         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
433         LDLM_LOCK_GET(lock);
434         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
435 }
436
437 /* Args: unlocked lock */
438 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
439 {
440         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
441         ENTRY;
442
443         if (lock == NULL)
444                 LBUG();
445
446         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
447         l_lock(&lock->l_resource->lr_namespace->ns_lock);
448         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
449                 lock->l_readers--;
450         else
451                 lock->l_writers--;
452
453         /* If we received a blocked AST and this was the last reference,
454          * run the callback. */
455         if (!lock->l_readers && !lock->l_writers &&
456             (lock->l_flags & LDLM_FL_CBPENDING)) {
457                 if (!lock->l_resource->lr_namespace->ns_client &&
458                     lock->l_export)
459                         CERROR("FL_CBPENDING set on non-local lock--just a "
460                                "warning\n");
461
462                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
463                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
464
465                 /* FIXME: need a real 'desc' here */
466                 lock->l_blocking_ast(lock, NULL, lock->l_data,
467                                      lock->l_data_len, LDLM_CB_BLOCKING);
468         } else
469                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
470
471         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
472         LDLM_LOCK_PUT(lock);    /* matches the handle2lock above */
473
474         EXIT;
475 }
476
477 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
478                                  struct list_head *queue)
479 {
480         struct list_head *tmp, *pos;
481         int rc = 1;
482
483         list_for_each_safe(tmp, pos, queue) {
484                 struct ldlm_lock *child;
485                 ldlm_res_compat compat;
486
487                 child = list_entry(tmp, struct ldlm_lock, l_res_link);
488                 if (lock == child)
489                         continue;
490
491                 compat = ldlm_res_compat_table[child->l_resource->lr_type];
492                 if (compat && compat(child, lock)) {
493                         CDEBUG(D_OTHER, "compat function succeded, next.\n");
494                         continue;
495                 }
496                 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
497                         CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
498                         continue;
499                 }
500
501                 rc = 0;
502
503                 if (send_cbs && child->l_blocking_ast != NULL) {
504                         CDEBUG(D_OTHER, "lock %p incompatible; sending "
505                                "blocking AST.\n", child);
506                         ldlm_add_ast_work_item(child, lock);
507                 }
508         }
509
510         return rc;
511 }
512
513 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
514 {
515         int rc;
516         ENTRY;
517
518         l_lock(&lock->l_resource->lr_namespace->ns_lock);
519         rc = ldlm_lock_compat_list(lock, send_cbs,
520                                    &lock->l_resource->lr_granted);
521         /* FIXME: should we be sending ASTs to converting? */
522         if (rc)
523                 rc = ldlm_lock_compat_list
524                         (lock, send_cbs, &lock->l_resource->lr_converting);
525
526         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
527         RETURN(rc);
528 }
529
530 /* NOTE: called by
531    - ldlm_handle_enqueuque - resource
532 */
533 void ldlm_grant_lock(struct ldlm_lock *lock)
534 {
535         struct ldlm_resource *res = lock->l_resource;
536         ENTRY;
537
538         l_lock(&lock->l_resource->lr_namespace->ns_lock);
539         ldlm_resource_add_lock(res, &res->lr_granted, lock);
540         lock->l_granted_mode = lock->l_req_mode;
541
542         if (lock->l_granted_mode < res->lr_most_restr)
543                 res->lr_most_restr = lock->l_granted_mode;
544
545         if (lock->l_completion_ast) {
546                 ldlm_add_ast_work_item(lock, NULL);
547         }
548         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
549         EXIT;
550 }
551
552 /* returns a referenced lock or NULL */
553 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
554                                       struct ldlm_extent *extent)
555 {
556         struct ldlm_lock *lock;
557         struct list_head *tmp;
558
559         list_for_each(tmp, queue) {
560                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
561
562                 if (lock->l_flags & (LDLM_FL_CBPENDING | LDLM_FL_DESTROYED))
563                         continue;
564
565                 /* lock_convert() takes the resource lock, so we're sure that
566                  * req_mode, lr_type, and l_cookie won't change beneath us */
567                 if (lock->l_req_mode != mode)
568                         continue;
569
570                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
571                     (lock->l_extent.start > extent->start ||
572                      lock->l_extent.end < extent->end))
573                         continue;
574
575                 ldlm_lock_addref_internal(lock, mode);
576                 return lock;
577         }
578
579         return NULL;
580 }
581
582 /* Must be called with no resource or lock locks held.
583  *
584  * Returns 1 if it finds an already-existing lock that is compatible; in this
585  * case, lockh is filled in with a addref()ed lock
586 */
587 int ldlm_lock_match(struct ldlm_namespace *ns, __u64 * res_id, __u32 type,
588                     void *cookie, int cookielen, ldlm_mode_t mode,
589                     struct lustre_handle *lockh)
590 {
591         struct ldlm_resource *res;
592         struct ldlm_lock *lock;
593         int rc = 0;
594         ENTRY;
595
596         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
597         if (res == NULL)
598                 RETURN(0);
599
600         ns = res->lr_namespace;
601         l_lock(&ns->ns_lock);
602
603         if ((lock = search_queue(&res->lr_granted, mode, cookie)))
604                 GOTO(out, rc = 1);
605         if ((lock = search_queue(&res->lr_converting, mode, cookie)))
606                 GOTO(out, rc = 1);
607         if ((lock = search_queue(&res->lr_waiting, mode, cookie)))
608                 GOTO(out, rc = 1);
609
610         EXIT;
611       out:
612         ldlm_resource_put(res);
613         l_unlock(&ns->ns_lock);
614
615         if (lock) {
616                 ldlm_lock2handle(lock, lockh);
617                 if (lock->l_completion_ast)
618                         lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
619         }
620         if (rc)
621                 LDLM_DEBUG(lock, "matched");
622         else
623                 LDLM_DEBUG_NOLOCK("not matched");
624         return rc;
625 }
626
627 /* Returns a referenced lock */
628 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
629                                    struct lustre_handle *parent_lock_handle,
630                                    __u64 * res_id, __u32 type,
631                                    ldlm_mode_t mode, void *data, __u32 data_len)
632 {
633         struct ldlm_resource *res, *parent_res = NULL;
634         struct ldlm_lock *lock, *parent_lock;
635
636         parent_lock = ldlm_handle2lock(parent_lock_handle);
637         if (parent_lock)
638                 parent_res = parent_lock->l_resource;
639
640         res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
641         if (res == NULL)
642                 RETURN(NULL);
643
644         lock = ldlm_lock_new(parent_lock, res);
645         if (lock == NULL) {
646                 ldlm_resource_put(res);
647                 RETURN(NULL);
648         }
649
650         lock->l_req_mode = mode;
651         lock->l_data = data;
652         lock->l_data_len = data_len;
653
654         return lock;
655 }
656
657 /* Must be called with lock->l_lock and lock->l_resource->lr_lock not held */
658 ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock * lock,
659                                void *cookie, int cookie_len,
660                                int *flags,
661                                ldlm_completion_callback completion,
662                                ldlm_blocking_callback blocking)
663 {
664         struct ldlm_resource *res;
665         int local;
666         ldlm_res_policy policy;
667         ENTRY;
668
669         res = lock->l_resource;
670         lock->l_blocking_ast = blocking;
671
672         if (res->lr_type == LDLM_EXTENT)
673                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
674
675         /* policies are not executed on the client */
676         local = res->lr_namespace->ns_client;
677         if (!local && (policy = ldlm_res_policy_table[res->lr_type])) {
678                 int rc;
679                 rc = policy(lock, cookie, lock->l_req_mode, NULL);
680
681                 if (rc == ELDLM_LOCK_CHANGED) {
682                         res = lock->l_resource;
683                         *flags |= LDLM_FL_LOCK_CHANGED;
684                 } else if (rc == ELDLM_LOCK_ABORTED) {
685                         ldlm_lock_destroy(lock);
686                         RETURN(rc);
687                 }
688         }
689
690         lock->l_cookie = cookie;
691         lock->l_cookie_len = cookie_len;
692
693         l_lock(&res->lr_namespace->ns_lock);
694         if (local && lock->l_req_mode == lock->l_granted_mode) {
695                 /* The server returned a blocked lock, but it was granted before
696                  * we got a chance to actually enqueue it.  We don't need to do
697                  * anything else. */
698                 *flags &= ~(LDLM_FL_BLOCK_GRANTED | 
699                           LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
700                 GOTO(out, ELDLM_OK);
701         }
702
703         /* This distinction between local lock trees is very important; a client
704          * namespace only has information about locks taken by that client, and
705          * thus doesn't have enough information to decide for itself if it can
706          * be granted (below).  In this case, we do exactly what the server
707          * tells us to do, as dictated by the 'flags' */
708         ldlm_resource_unlink_lock(lock);
709         if (local) {
710                 if (*flags & LDLM_FL_BLOCK_CONV)
711                         ldlm_resource_add_lock(res, res->lr_converting.prev,
712                                                lock);
713                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
714                         ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
715                 else
716                         ldlm_grant_lock(lock);
717                 GOTO(out, ELDLM_OK);
718         }
719
720         /* FIXME: We may want to optimize by checking lr_most_restr */
721         if (!list_empty(&res->lr_converting)) {
722                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
723                 *flags |= LDLM_FL_BLOCK_CONV;
724                 GOTO(out, ELDLM_OK);
725         }
726         if (!list_empty(&res->lr_waiting)) {
727                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
728                 *flags |= LDLM_FL_BLOCK_WAIT;
729                 GOTO(out, ELDLM_OK);
730         }
731         if (!ldlm_lock_compat(lock, 0)) {
732                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
733                 *flags |= LDLM_FL_BLOCK_GRANTED;
734                 GOTO(out, ELDLM_OK);
735         }
736
737         ldlm_grant_lock(lock);
738         EXIT;
739       out:
740         l_unlock(&res->lr_namespace->ns_lock);
741         /* Don't set 'completion_ast' until here so that if the lock is granted
742          * immediately we don't do an unnecessary completion call. */
743         lock->l_completion_ast = completion;
744         return ELDLM_OK;
745 }
746
747 /* Must be called with namespace taken: queue is waiting or converting. */
748 static int ldlm_reprocess_queue(struct ldlm_resource *res,
749                                 struct list_head *queue)
750 {
751         struct list_head *tmp, *pos;
752         ENTRY;
753
754         list_for_each_safe(tmp, pos, queue) {
755                 struct ldlm_lock *pending;
756                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
757
758                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
759
760                 if (!ldlm_lock_compat(pending, 1))
761                         RETURN(1);
762
763                 list_del_init(&pending->l_res_link);
764                 ldlm_grant_lock(pending);
765         }
766
767         RETURN(0);
768 }
769
770 void ldlm_run_ast_work(struct list_head *rpc_list)
771 {
772         struct list_head *tmp, *pos;
773         int rc;
774         ENTRY;
775
776         list_for_each_safe(tmp, pos, rpc_list) {
777                 struct ldlm_ast_work *w =
778                         list_entry(tmp, struct ldlm_ast_work, w_list);
779
780                 if (w->w_blocking)
781                         rc = w->w_lock->l_blocking_ast
782                                 (w->w_lock, &w->w_desc, w->w_data,
783                                  w->w_datalen, LDLM_CB_BLOCKING);
784                 else
785                         rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
786                 if (rc)
787                         CERROR("Failed AST - should clean & disconnect "
788                                "client\n");
789                 LDLM_LOCK_PUT(w->w_lock);
790                 list_del(&w->w_list);
791                 OBD_FREE(w, sizeof(*w));
792         }
793         EXIT;
794 }
795
796 /* Must be called with resource->lr_lock not taken. */
797 void ldlm_reprocess_all(struct ldlm_resource *res)
798 {
799         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
800         ENTRY;
801
802         /* Local lock trees don't get reprocessed. */
803         if (res->lr_namespace->ns_client) {
804                 EXIT;
805                 return;
806         }
807
808         l_lock(&res->lr_namespace->ns_lock);
809         res->lr_tmp = &rpc_list;
810
811         ldlm_reprocess_queue(res, &res->lr_converting);
812         if (list_empty(&res->lr_converting))
813                 ldlm_reprocess_queue(res, &res->lr_waiting);
814
815         res->lr_tmp = NULL;
816         l_unlock(&res->lr_namespace->ns_lock);
817
818         ldlm_run_ast_work(&rpc_list);
819         EXIT;
820 }
821
822 void ldlm_cancel_callback(struct ldlm_lock *lock)
823 {
824         l_lock(&lock->l_resource->lr_namespace->ns_lock);
825         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
826                 lock->l_flags |= LDLM_FL_CANCEL;
827                 lock->l_blocking_ast(lock, NULL, lock->l_data,
828                                      lock->l_data_len, LDLM_CB_CANCELING);
829         }
830         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
831 }
832
833 void ldlm_lock_cancel(struct ldlm_lock *lock)
834 {
835         struct ldlm_resource *res;
836         struct ldlm_namespace *ns;
837         ENTRY;
838
839         res = lock->l_resource;
840         ns = res->lr_namespace;
841
842         l_lock(&ns->ns_lock);
843         if (lock->l_readers || lock->l_writers)
844                 CDEBUG(D_INFO, "lock still has references (%d readers, %d "
845                        "writers)\n", lock->l_readers, lock->l_writers);
846
847         ldlm_cancel_callback(lock);
848
849         ldlm_del_waiting_lock(lock);
850         ldlm_resource_unlink_lock(lock);
851         ldlm_lock_destroy(lock);
852         l_unlock(&ns->ns_lock);
853         EXIT;
854 }
855
856 void ldlm_cancel_locks_for_export(struct obd_export *exp)
857 {
858         struct list_head *iter, *n; /* MUST BE CALLED "n"! */
859
860         list_for_each_safe(iter, n, &exp->exp_ldlm_data.led_held_locks) {
861                 struct ldlm_lock *lock;
862                 struct ldlm_resource *res;
863                 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
864                 res = ldlm_resource_getref(lock->l_resource);
865                 LDLM_DEBUG(lock, "cancelling lock for export %p", exp);
866                 ldlm_lock_cancel(lock);
867                 ldlm_reprocess_all(res);
868                 ldlm_resource_put(res);
869         }
870 }
871
872 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
873                                         int *flags)
874 {
875         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
876         struct ldlm_resource *res;
877         struct ldlm_namespace *ns;
878         int granted = 0;
879         ENTRY;
880
881         res = lock->l_resource;
882         ns = res->lr_namespace;
883
884         l_lock(&ns->ns_lock);
885
886         lock->l_req_mode = new_mode;
887         ldlm_resource_unlink_lock(lock);
888
889         /* If this is a local resource, put it on the appropriate list. */
890         if (res->lr_namespace->ns_client) {
891                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED))
892                         ldlm_resource_add_lock(res, res->lr_converting.prev,
893                                                lock);
894                 else {
895                         /* This should never happen, because of the way the
896                          * server handles conversions. */
897                         LBUG();
898
899                         res->lr_tmp = &rpc_list;
900                         ldlm_grant_lock(lock);
901                         res->lr_tmp = NULL;
902                         granted = 1;
903                         /* FIXME: completion handling not with ns_lock held ! */
904                         if (lock->l_completion_ast)
905                                 lock->l_completion_ast(lock, 0);
906                 }
907         } else {
908                 /* FIXME: We should try the conversion right away and possibly
909                  * return success without the need for an extra AST */
910                 ldlm_resource_add_lock(res, res->lr_converting.prev, lock);
911                 *flags |= LDLM_FL_BLOCK_CONV;
912         }
913
914         l_unlock(&ns->ns_lock);
915
916         if (granted)
917                 ldlm_run_ast_work(&rpc_list);
918         RETURN(res);
919 }
920
921 void ldlm_lock_dump(struct ldlm_lock *lock)
922 {
923         char ver[128];
924
925         if (!(portal_debug & D_OTHER))
926                 return;
927
928         if (RES_VERSION_SIZE != 4)
929                 LBUG();
930
931         if (!lock) {
932                 CDEBUG(D_OTHER, "  NULL LDLM lock\n");
933                 return;
934         }
935
936         snprintf(ver, sizeof(ver), "%x %x %x %x",
937                  lock->l_version[0], lock->l_version[1],
938                  lock->l_version[2], lock->l_version[3]);
939
940         CDEBUG(D_OTHER, "  -- Lock dump: %p (%s)\n", lock, ver);
941         if (lock->l_export && lock->l_export->exp_connection)
942                 CDEBUG(D_OTHER, "  Node: NID %x (rhandle: "LPX64")\n",
943                        lock->l_export->exp_connection->c_peer.peer_nid,
944                        lock->l_remote_handle.addr);
945         else
946                 CDEBUG(D_OTHER, "  Node: local\n");
947         CDEBUG(D_OTHER, "  Parent: %p\n", lock->l_parent);
948         CDEBUG(D_OTHER, "  Resource: %p ("LPD64")\n", lock->l_resource,
949                lock->l_resource->lr_name[0]);
950         CDEBUG(D_OTHER, "  Requested mode: %d, granted mode: %d\n",
951                (int)lock->l_req_mode, (int)lock->l_granted_mode);
952         CDEBUG(D_OTHER, "  Readers: %u ; Writers; %u\n",
953                lock->l_readers, lock->l_writers);
954         if (lock->l_resource->lr_type == LDLM_EXTENT)
955                 CDEBUG(D_OTHER, "  Extent: %Lu -> %Lu\n",
956                        (unsigned long long)lock->l_extent.start,
957                        (unsigned long long)lock->l_extent.end);
958 }