Whamcloud - gitweb
in ldlm_lock_dump, show the lock holder's nid and the remote lock handle
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  * This code is issued under the GNU General Public License.
7  * See the file COPYING in this distribution
8  *
9  * by Cluster File Systems, Inc.
10  * authors, Peter Braam <braam@clusterfs.com> &
11  * Phil Schwan <phil@clusterfs.com>
12  */
13
14 #define DEBUG_SUBSYSTEM S_LDLM
15
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/random.h>
19 #include <linux/lustre_dlm.h>
20 #include <linux/lustre_mds.h>
21
22 /* lock types */
23 char *ldlm_lockname[] = {
24         [0] "--",
25         [LCK_EX] "EX",
26         [LCK_PW] "PW",
27         [LCK_PR] "PR",
28         [LCK_CW] "CW",
29         [LCK_CR] "CR",
30         [LCK_NL] "NL"
31 };
32 char *ldlm_typename[] = {
33         [LDLM_PLAIN] "PLN",
34         [LDLM_EXTENT] "EXT",
35         [LDLM_MDSINTENT] "INT"
36 };
37
38 char *ldlm_it2str(int it)
39 {
40         switch (it) {
41         case IT_OPEN:
42                 return "open";
43         case IT_CREAT:
44                 return "creat";
45         case (IT_OPEN | IT_CREAT):
46                 return "open|creat";
47         case IT_MKDIR:
48                 return "mkdir";
49         case IT_LINK:
50                 return "link";
51         case IT_SYMLINK:
52                 return "symlink";
53         case IT_UNLINK:
54                 return "unlink";
55         case IT_RMDIR:
56                 return "rmdir";
57         case IT_RENAME:
58                 return "rename";
59         case IT_RENAME2:
60                 return "rename2";
61         case IT_READDIR:
62                 return "readdir";
63         case IT_GETATTR:
64                 return "getattr";
65         case IT_SETATTR:
66                 return "setattr";
67         case IT_READLINK:
68                 return "readlink";
69         case IT_MKNOD:
70                 return "mknod";
71         case IT_LOOKUP:
72                 return "lookup";
73         default:
74                 CERROR("Unknown intent %d\n", it);
75                 return "UNKNOWN";
76         }
77 }
78
79 extern kmem_cache_t *ldlm_lock_slab;
80
81 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
82
83 ldlm_res_compat ldlm_res_compat_table[] = {
84         [LDLM_PLAIN] ldlm_plain_compat,
85         [LDLM_EXTENT] ldlm_extent_compat,
86         [LDLM_MDSINTENT] ldlm_plain_compat
87 };
88
89 ldlm_res_policy ldlm_res_policy_table[] = {
90         [LDLM_PLAIN] NULL,
91         [LDLM_EXTENT] ldlm_extent_policy,
92         [LDLM_MDSINTENT] NULL
93 };
94
95 void ldlm_register_intent(int (*arg) (struct ldlm_lock * lock, void *req_cookie,
96                                       ldlm_mode_t mode, void *data))
97 {
98         ldlm_res_policy_table[LDLM_MDSINTENT] = arg;
99 }
100
101 void ldlm_unregister_intent()
102 {
103         ldlm_res_policy_table[LDLM_MDSINTENT] = NULL;
104 }
105
106 /*
107  * REFCOUNTED LOCK OBJECTS
108  */
109
110
111 /*
112  * Lock refcounts, during creation:
113  *   - one special one for allocation, dec'd only once in destroy
114  *   - one for being a lock that's in-use
115  *   - one for the addref associated with a new lock
116  */
117 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
118 {
119         l_lock(&lock->l_resource->lr_namespace->ns_lock);
120         lock->l_refc++;
121         ldlm_resource_getref(lock->l_resource);
122         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
123         return lock;
124 }
125
126 void ldlm_lock_put(struct ldlm_lock *lock)
127 {
128         struct lustre_lock *nslock = &lock->l_resource->lr_namespace->ns_lock;
129         ENTRY;
130
131         l_lock(nslock);
132         lock->l_refc--;
133         LDLM_DEBUG(lock, "after refc--");
134         if (lock->l_refc < 0)
135                 LBUG();
136
137         ldlm_resource_put(lock->l_resource);
138         if (lock->l_parent)
139                 LDLM_LOCK_PUT(lock->l_parent);
140
141         if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
142                 lock->l_resource = NULL;
143                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
144                 if (lock->l_connection)
145                         ptlrpc_put_connection(lock->l_connection);
146                 CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 1).\n",
147                        sizeof(*lock), lock);
148                 kmem_cache_free(ldlm_lock_slab, lock);
149         }
150         l_unlock(nslock);
151         EXIT;
152 }
153
154 void ldlm_lock_destroy(struct ldlm_lock *lock)
155 {
156         ENTRY;
157         l_lock(&lock->l_resource->lr_namespace->ns_lock);
158
159         if (!list_empty(&lock->l_children)) {
160                 LDLM_DEBUG(lock, "still has children (%p)!",
161                            lock->l_children.next);
162                 ldlm_lock_dump(lock);
163                 LBUG();
164         }
165         if (lock->l_readers || lock->l_writers) {
166                 LDLM_DEBUG(lock, "lock still has references");
167                 ldlm_lock_dump(lock);
168                 LBUG();
169         }
170
171         if (!list_empty(&lock->l_res_link)) {
172                 ldlm_lock_dump(lock);
173                 LBUG();
174         }
175
176         if (lock->l_flags & LDLM_FL_DESTROYED) {
177                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
178                 EXIT;
179                 return;
180         }
181
182         lock->l_flags = LDLM_FL_DESTROYED;
183         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
184         LDLM_LOCK_PUT(lock);
185         EXIT;
186 }
187
188 /*
189    usage: pass in a resource on which you have done get
190           pass in a parent lock on which you have done a get
191           do not put the resource or the parent
192    returns: lock with refcount 1
193 */
194 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
195                                        struct ldlm_resource *resource)
196 {
197         struct ldlm_lock *lock;
198         ENTRY;
199
200         if (resource == NULL)
201                 LBUG();
202
203         lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
204         if (lock == NULL)
205                 RETURN(NULL);
206         CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
207                "%p (tot %d).\n", sizeof(*lock), lock, 1);
208
209         memset(lock, 0, sizeof(*lock));
210         get_random_bytes(&lock->l_random, sizeof(__u64));
211
212         lock->l_resource = resource;
213         /* this refcount matches the one of the resource passed
214            in which is not being put away */
215         lock->l_refc = 1;
216         INIT_LIST_HEAD(&lock->l_children);
217         INIT_LIST_HEAD(&lock->l_res_link);
218         init_waitqueue_head(&lock->l_waitq);
219
220         if (parent != NULL) {
221                 l_lock(&parent->l_resource->lr_namespace->ns_lock);
222                 lock->l_parent = parent;
223                 list_add(&lock->l_childof, &parent->l_children);
224                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
225         }
226         /* this is the extra refcount, to prevent the lock
227            evaporating */
228         LDLM_LOCK_GET(lock);
229         RETURN(lock);
230 }
231
232 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
233 {
234         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
235         struct ldlm_resource *oldres = lock->l_resource;
236         int type, i;
237         ENTRY;
238
239         l_lock(&ns->ns_lock);
240         if (memcmp(new_resid, lock->l_resource->lr_name,
241                    sizeof(lock->l_resource->lr_name)) == 0) {
242                 /* Nothing to do */
243                 l_unlock(&ns->ns_lock);
244                 RETURN(0);
245         }
246
247         type = lock->l_resource->lr_type;
248         if (new_resid[0] == 0)
249                 LBUG();
250         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid, type, 1);
251         if (lock->l_resource == NULL) {
252                 LBUG();
253                 RETURN(-ENOMEM);
254         }
255
256         /* move references over */
257         for (i = 0; i < lock->l_refc; i++) {
258                 int rc;
259                 ldlm_resource_getref(lock->l_resource);
260                 rc = ldlm_resource_put(oldres);
261                 if (rc == 1 && i != lock->l_refc - 1)
262                         LBUG();
263         }
264         /* compensate for the initial get above.. */
265         ldlm_resource_put(lock->l_resource);
266
267         l_unlock(&ns->ns_lock);
268         RETURN(0);
269 }
270
271 /*
272  *  HANDLES
273  */
274
275 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
276 {
277         lockh->addr = (__u64) (unsigned long)lock;
278         lockh->cookie = lock->l_random;
279 }
280
281 struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
282 {
283         struct ldlm_lock *lock = NULL, *retval = NULL;
284         ENTRY;
285
286         if (!handle || !handle->addr)
287                 RETURN(NULL);
288
289         lock = (struct ldlm_lock *)(unsigned long)(handle->addr);
290         if (!kmem_cache_validate(ldlm_lock_slab, (void *)lock))
291                 RETURN(NULL);
292
293         l_lock(&lock->l_resource->lr_namespace->ns_lock);
294         if (lock->l_random != handle->cookie)
295                 GOTO(out, NULL);
296
297         if (lock->l_flags & LDLM_FL_DESTROYED)
298                 GOTO(out, NULL);
299
300         retval = LDLM_LOCK_GET(lock);
301         EXIT;
302       out:
303         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
304         return retval;
305 }
306
307
308
309
310 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
311 {
312         return lockmode_compat(a->l_req_mode, b->l_req_mode);
313 }
314
315 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
316 {
317         ldlm_res2desc(lock->l_resource, &desc->l_resource);
318         desc->l_req_mode = lock->l_req_mode;
319         desc->l_granted_mode = lock->l_granted_mode;
320         memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
321         memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
322 }
323
324 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
325                                    struct ldlm_lock *new)
326 {
327         struct ldlm_ast_work *w;
328         ENTRY;
329
330         l_lock(&lock->l_resource->lr_namespace->ns_lock);
331         if (new && (lock->l_flags & LDLM_FL_AST_SENT))
332                 GOTO(out, 0);
333
334         OBD_ALLOC(w, sizeof(*w));
335         if (!w) {
336                 LBUG();
337                 GOTO(out, 0);
338         }
339
340         if (new) {
341                 lock->l_flags |= LDLM_FL_AST_SENT;
342                 w->w_blocking = 1;
343                 ldlm_lock2desc(new, &w->w_desc);
344         }
345
346         w->w_lock = LDLM_LOCK_GET(lock);
347         list_add(&w->w_list, lock->l_resource->lr_tmp);
348       out:
349         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
350         return;
351 }
352
353 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
354 {
355         struct ldlm_lock *lock;
356
357         lock = ldlm_handle2lock(lockh);
358         ldlm_lock_addref_internal(lock, mode);
359         LDLM_LOCK_PUT(lock);
360 }
361
362 /* only called for local locks */
363 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
364 {
365         l_lock(&lock->l_resource->lr_namespace->ns_lock);
366         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
367                 lock->l_readers++;
368         else
369                 lock->l_writers++;
370         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
371         LDLM_LOCK_GET(lock);
372         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
373 }
374
375 /* Args: unlocked lock */
376 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
377 {
378         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
379         ENTRY;
380
381         if (lock == NULL)
382                 LBUG();
383
384         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
385         l_lock(&lock->l_resource->lr_namespace->ns_lock);
386         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
387                 lock->l_readers--;
388         else
389                 lock->l_writers--;
390
391         /* If we received a blocked AST and this was the last reference,
392          * run the callback. */
393         if (!lock->l_readers && !lock->l_writers &&
394             (lock->l_flags & LDLM_FL_CBPENDING)) {
395                 if (!lock->l_resource->lr_namespace->ns_client) {
396                         CERROR("LDLM_FL_CBPENDING set on non-local lock!\n");
397                         LBUG();
398                 }
399
400                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
401                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
402
403                 /* FIXME: need a real 'desc' here */
404                 lock->l_blocking_ast(lock, NULL, lock->l_data,
405                                      lock->l_data_len);
406         } else
407                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
408
409         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
410         LDLM_LOCK_PUT(lock);    /* matches the handle2lock above */
411
412         EXIT;
413 }
414
415 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
416                                  struct list_head *queue)
417 {
418         struct list_head *tmp, *pos;
419         int rc = 1;
420
421         list_for_each_safe(tmp, pos, queue) {
422                 struct ldlm_lock *child;
423                 ldlm_res_compat compat;
424
425                 child = list_entry(tmp, struct ldlm_lock, l_res_link);
426                 if (lock == child)
427                         continue;
428
429                 compat = ldlm_res_compat_table[child->l_resource->lr_type];
430                 if (compat && compat(child, lock)) {
431                         CDEBUG(D_OTHER, "compat function succeded, next.\n");
432                         continue;
433                 }
434                 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
435                         CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
436                         continue;
437                 }
438
439                 rc = 0;
440
441                 if (send_cbs && child->l_blocking_ast != NULL) {
442                         CDEBUG(D_OTHER, "incompatible; sending blocking "
443                                "AST.\n");
444                         ldlm_add_ast_work_item(child, lock);
445                 }
446         }
447
448         return rc;
449 }
450
451 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
452 {
453         int rc;
454         ENTRY;
455
456         l_lock(&lock->l_resource->lr_namespace->ns_lock);
457         rc = ldlm_lock_compat_list(lock, send_cbs,
458                                    &lock->l_resource->lr_granted);
459         /* FIXME: should we be sending ASTs to converting? */
460         if (rc)
461                 rc = ldlm_lock_compat_list
462                         (lock, send_cbs, &lock->l_resource->lr_converting);
463
464         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
465         RETURN(rc);
466 }
467
468 /* NOTE: called by
469    - ldlm_handle_enqueuque - resource
470 */
471 void ldlm_grant_lock(struct ldlm_lock *lock)
472 {
473         struct ldlm_resource *res = lock->l_resource;
474         ENTRY;
475
476         l_lock(&lock->l_resource->lr_namespace->ns_lock);
477         ldlm_resource_add_lock(res, &res->lr_granted, lock);
478         lock->l_granted_mode = lock->l_req_mode;
479
480         if (lock->l_granted_mode < res->lr_most_restr)
481                 res->lr_most_restr = lock->l_granted_mode;
482
483         if (lock->l_completion_ast) {
484                 ldlm_add_ast_work_item(lock, NULL);
485         }
486         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
487         EXIT;
488 }
489
490 /* returns a referenced lock or NULL */
491 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
492                                       struct ldlm_extent *extent)
493 {
494         struct ldlm_lock *lock;
495         struct list_head *tmp;
496
497         list_for_each(tmp, queue) {
498                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
499
500                 if (lock->l_flags & LDLM_FL_CBPENDING)
501                         continue;
502
503                 /* lock_convert() takes the resource lock, so we're sure that
504                  * req_mode, lr_type, and l_cookie won't change beneath us */
505                 if (lock->l_req_mode != mode)
506                         continue;
507
508                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
509                     (lock->l_extent.start > extent->start ||
510                      lock->l_extent.end < extent->end))
511                         continue;
512
513                 ldlm_lock_addref_internal(lock, mode);
514                 return lock;
515         }
516
517         return NULL;
518 }
519
520 /* Must be called with no resource or lock locks held.
521  *
522  * Returns 1 if it finds an already-existing lock that is compatible; in this
523  * case, lockh is filled in with a addref()ed lock 
524 */
525 int ldlm_lock_match(struct ldlm_namespace *ns, __u64 * res_id, __u32 type,
526                     void *cookie, int cookielen, ldlm_mode_t mode,
527                     struct lustre_handle *lockh)
528 {
529         struct ldlm_resource *res;
530         struct ldlm_lock *lock;
531         int rc = 0;
532         ENTRY;
533
534         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
535         if (res == NULL)
536                 RETURN(0);
537
538         ns = res->lr_namespace;
539         l_lock(&ns->ns_lock);
540
541         if ((lock = search_queue(&res->lr_granted, mode, cookie)))
542                 GOTO(out, rc = 1);
543         if ((lock = search_queue(&res->lr_converting, mode, cookie)))
544                 GOTO(out, rc = 1);
545         if ((lock = search_queue(&res->lr_waiting, mode, cookie)))
546                 GOTO(out, rc = 1);
547
548         EXIT;
549       out:
550         ldlm_resource_put(res);
551         l_unlock(&ns->ns_lock);
552
553         if (lock) {
554                 ldlm_lock2handle(lock, lockh);
555                 if (lock->l_completion_ast)
556                         lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
557         }
558         if (rc)
559                 LDLM_DEBUG(lock, "matched");
560         else
561                 LDLM_DEBUG_NOLOCK("not matched");
562         return rc;
563 }
564
565 /*   Returns a referenced, lock */
566 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
567                                    struct lustre_handle *parent_lock_handle,
568                                    __u64 * res_id, __u32 type,
569                                    ldlm_mode_t mode, void *data, __u32 data_len)
570 {
571         struct ldlm_resource *res, *parent_res = NULL;
572         struct ldlm_lock *lock, *parent_lock;
573
574         parent_lock = ldlm_handle2lock(parent_lock_handle);
575         if (parent_lock)
576                 parent_res = parent_lock->l_resource;
577
578         res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
579         if (res == NULL)
580                 RETURN(NULL);
581
582         lock = ldlm_lock_new(parent_lock, res);
583         if (lock == NULL) {
584                 ldlm_resource_put(res);
585                 RETURN(NULL);
586         }
587
588         lock->l_req_mode = mode;
589         lock->l_data = data;
590         lock->l_data_len = data_len;
591
592         return lock;
593 }
594
595 /* Must be called with lock->l_lock and lock->l_resource->lr_lock not held */
596 ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock * lock,
597                                void *cookie, int cookie_len,
598                                int *flags,
599                                ldlm_completion_callback completion,
600                                ldlm_blocking_callback blocking)
601 {
602         struct ldlm_resource *res;
603         int local;
604         ldlm_res_policy policy;
605         ENTRY;
606
607         res = lock->l_resource;
608         lock->l_blocking_ast = blocking;
609
610         if (res->lr_type == LDLM_EXTENT)
611                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
612
613         /* policies are not executed on the client */
614         local = res->lr_namespace->ns_client;
615         if (!local && (policy = ldlm_res_policy_table[res->lr_type])) {
616                 int rc;
617                 rc = policy(lock, cookie, lock->l_req_mode, NULL);
618
619                 if (rc == ELDLM_LOCK_CHANGED) {
620                         res = lock->l_resource;
621                         *flags |= LDLM_FL_LOCK_CHANGED;
622                 } else if (rc == ELDLM_LOCK_ABORTED) {
623                         ldlm_lock_destroy(lock);
624                         RETURN(rc);
625                 }
626         }
627
628         lock->l_cookie = cookie;
629         lock->l_cookie_len = cookie_len;
630
631         if (local && lock->l_req_mode == lock->l_granted_mode) {
632                 /* The server returned a blocked lock, but it was granted before
633                  * we got a chance to actually enqueue it.  We don't need to do
634                  * anything else. */
635                 GOTO(out, ELDLM_OK);
636         }
637
638         /* This distinction between local lock trees is very important; a client
639          * namespace only has information about locks taken by that client, and
640          * thus doesn't have enough information to decide for itself if it can
641          * be granted (below).  In this case, we do exactly what the server
642          * tells us to do, as dictated by the 'flags' */
643         ldlm_resource_unlink_lock(lock);
644         if (local) {
645                 if (*flags & LDLM_FL_BLOCK_CONV)
646                         ldlm_resource_add_lock(res, res->lr_converting.prev,
647                                                lock);
648                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
649                         ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
650                 else
651                         ldlm_grant_lock(lock);
652                 GOTO(out, ELDLM_OK);
653         }
654
655         /* FIXME: We may want to optimize by checking lr_most_restr */
656         if (!list_empty(&res->lr_converting)) {
657                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
658                 *flags |= LDLM_FL_BLOCK_CONV;
659                 GOTO(out, ELDLM_OK);
660         }
661         if (!list_empty(&res->lr_waiting)) {
662                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
663                 *flags |= LDLM_FL_BLOCK_WAIT;
664                 GOTO(out, ELDLM_OK);
665         }
666         if (!ldlm_lock_compat(lock, 0)) {
667                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
668                 *flags |= LDLM_FL_BLOCK_GRANTED;
669                 GOTO(out, ELDLM_OK);
670         }
671
672         ldlm_grant_lock(lock);
673         EXIT;
674       out:
675         /* Don't set 'completion_ast' until here so that if the lock is granted
676          * immediately we don't do an unnecessary completion call. */
677         lock->l_completion_ast = completion;
678         return ELDLM_OK;
679 }
680
681 /* Must be called with namespace taken: queue is waiting or converting. */
682 static int ldlm_reprocess_queue(struct ldlm_resource *res,
683                                 struct list_head *queue)
684 {
685         struct list_head *tmp, *pos;
686         ENTRY;
687
688         list_for_each_safe(tmp, pos, queue) {
689                 struct ldlm_lock *pending;
690                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
691
692                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
693
694                 if (!ldlm_lock_compat(pending, 1))
695                         RETURN(1);
696
697                 list_del_init(&pending->l_res_link);
698                 ldlm_grant_lock(pending);
699         }
700
701         RETURN(0);
702 }
703
704 void ldlm_run_ast_work(struct list_head *rpc_list)
705 {
706         struct list_head *tmp, *pos;
707         int rc;
708         ENTRY;
709
710         list_for_each_safe(tmp, pos, rpc_list) {
711                 struct ldlm_ast_work *w =
712                         list_entry(tmp, struct ldlm_ast_work, w_list);
713
714                 if (w->w_blocking)
715                         rc = w->w_lock->l_blocking_ast
716                                 (w->w_lock, &w->w_desc, w->w_data,
717                                  w->w_datalen);
718                 else
719                         rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
720                 if (rc)
721                         CERROR("Failed AST - should clean & disconnect "
722                                "client\n");
723                 LDLM_LOCK_PUT(w->w_lock);
724                 list_del(&w->w_list);
725                 OBD_FREE(w, sizeof(*w));
726         }
727         EXIT;
728 }
729
730 /* Must be called with resource->lr_lock not taken. */
731 void ldlm_reprocess_all(struct ldlm_resource *res)
732 {
733         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
734         ENTRY;
735
736         /* Local lock trees don't get reprocessed. */
737         if (res->lr_namespace->ns_client) {
738                 EXIT;
739                 return;
740         }
741
742         l_lock(&res->lr_namespace->ns_lock);
743         res->lr_tmp = &rpc_list;
744
745         ldlm_reprocess_queue(res, &res->lr_converting);
746         if (list_empty(&res->lr_converting))
747                 ldlm_reprocess_queue(res, &res->lr_waiting);
748
749         res->lr_tmp = NULL;
750         l_unlock(&res->lr_namespace->ns_lock);
751
752         ldlm_run_ast_work(&rpc_list);
753         EXIT;
754 }
755
756 void ldlm_lock_cancel(struct ldlm_lock *lock)
757 {
758         struct ldlm_resource *res;
759         struct ldlm_namespace *ns;
760         ENTRY;
761
762         res = lock->l_resource;
763         ns = res->lr_namespace;
764
765         l_lock(&ns->ns_lock);
766         if (lock->l_readers || lock->l_writers)
767                 CDEBUG(D_INFO, "lock still has references (%d readers, %d "
768                        "writers)\n", lock->l_readers, lock->l_writers);
769
770         ldlm_resource_unlink_lock(lock);
771         ldlm_lock_destroy(lock);
772         l_unlock(&ns->ns_lock);
773         EXIT;
774 }
775
776 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
777                                         int *flags)
778 {
779         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
780         struct ldlm_resource *res;
781         struct ldlm_namespace *ns;
782         int granted = 0;
783         ENTRY;
784
785         res = lock->l_resource;
786         ns = res->lr_namespace;
787
788         l_lock(&ns->ns_lock);
789
790         lock->l_req_mode = new_mode;
791         ldlm_resource_unlink_lock(lock);
792
793         /* If this is a local resource, put it on the appropriate list. */
794         if (res->lr_namespace->ns_client) {
795                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED))
796                         ldlm_resource_add_lock(res, res->lr_converting.prev,
797                                                lock);
798                 else {
799                         res->lr_tmp = &rpc_list;
800                         ldlm_grant_lock(lock);
801                         res->lr_tmp = NULL;
802                         granted = 1;
803                         /* FIXME: completion handling not with ns_lock held ! */
804                         if (lock->l_completion_ast)
805                                 lock->l_completion_ast(lock, 0);
806                 }
807         } else
808                 list_add(&lock->l_res_link, res->lr_converting.prev);
809
810         l_unlock(&ns->ns_lock);
811
812         if (granted)
813                 ldlm_run_ast_work(&rpc_list);
814         RETURN(res);
815 }
816
817 void ldlm_lock_dump(struct ldlm_lock *lock)
818 {
819         char ver[128];
820
821         if (!(portal_debug & D_OTHER))
822                 return;
823
824         if (RES_VERSION_SIZE != 4)
825                 LBUG();
826
827         if (!lock) {
828                 CDEBUG(D_OTHER, "  NULL LDLM lock\n");
829                 return;
830         }
831
832         snprintf(ver, sizeof(ver), "%x %x %x %x",
833                  lock->l_version[0], lock->l_version[1],
834                  lock->l_version[2], lock->l_version[3]);
835
836         CDEBUG(D_OTHER, "  -- Lock dump: %p (%s)\n", lock, ver);
837         CDEBUG(D_OTHER, "  Node: NID %x (rhandle: %Lx)\n",
838                lock->l_connection->c_peer.peer_nid,
839                lock->l_remote_handle.addr);
840         CDEBUG(D_OTHER, "  Parent: %p\n", lock->l_parent);
841         CDEBUG(D_OTHER, "  Resource: %p (%Ld)\n", lock->l_resource,
842                lock->l_resource->lr_name[0]);
843         CDEBUG(D_OTHER, "  Requested mode: %d, granted mode: %d\n",
844                (int)lock->l_req_mode, (int)lock->l_granted_mode);
845         CDEBUG(D_OTHER, "  Readers: %u ; Writers; %u\n",
846                lock->l_readers, lock->l_writers);
847         if (lock->l_resource->lr_type == LDLM_EXTENT)
848                 CDEBUG(D_OTHER, "  Extent: %Lu -> %Lu\n",
849                        (unsigned long long)lock->l_extent.start,
850                        (unsigned long long)lock->l_extent.end);
851 }