Whamcloud - gitweb
Merge b_md into HEAD
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/lustre_dlm.h>
29 #include <linux/lustre_mds.h>
30 #include <linux/obd_class.h>
31
32 //struct lustre_lock ldlm_everything_lock;
33
34 /* lock types */
35 char *ldlm_lockname[] = {
36         [0] "--",
37         [LCK_EX] "EX",
38         [LCK_PW] "PW",
39         [LCK_PR] "PR",
40         [LCK_CW] "CW",
41         [LCK_CR] "CR",
42         [LCK_NL] "NL"
43 };
44 char *ldlm_typename[] = {
45         [LDLM_PLAIN] "PLN",
46         [LDLM_EXTENT] "EXT",
47 };
48
49 char *ldlm_it2str(int it)
50 {
51         switch (it) {
52         case IT_OPEN:
53                 return "open";
54         case IT_CREAT:
55                 return "creat";
56         case (IT_OPEN | IT_CREAT):
57                 return "open|creat";
58         case IT_READDIR:
59                 return "readdir";
60         case IT_GETATTR:
61                 return "getattr";
62         case IT_TRUNC:
63                 return "truncate";
64         case IT_SETATTR:
65                 return "setattr";
66         case IT_LOOKUP:
67                 return "lookup";
68         case IT_UNLINK:
69                 return "unlink";
70         default:
71                 CERROR("Unknown intent %d\n", it);
72                 return "UNKNOWN";
73         }
74 }
75
76 extern kmem_cache_t *ldlm_lock_slab;
77 struct lustre_lock ldlm_handle_lock;
78
79 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
80
81 ldlm_res_compat ldlm_res_compat_table[] = {
82         [LDLM_PLAIN] ldlm_plain_compat,
83         [LDLM_EXTENT] ldlm_extent_compat,
84 };
85
86 static ldlm_res_policy ldlm_intent_policy_func;
87
88 static int ldlm_plain_policy(struct ldlm_namespace *ns, struct ldlm_lock **lock,
89                              void *req_cookie, ldlm_mode_t mode, int flags,
90                              void *data)
91 {
92         if ((flags & LDLM_FL_HAS_INTENT) && ldlm_intent_policy_func) {
93                 return ldlm_intent_policy_func(ns, lock, req_cookie, mode,
94                                                flags, data);
95         }
96
97         return ELDLM_OK;
98 }
99
100 ldlm_res_policy ldlm_res_policy_table[] = {
101         [LDLM_PLAIN] ldlm_plain_policy,
102         [LDLM_EXTENT] ldlm_extent_policy,
103 };
104
105 void ldlm_register_intent(ldlm_res_policy arg)
106 {
107         ldlm_intent_policy_func = arg;
108 }
109
110 void ldlm_unregister_intent(void)
111 {
112         ldlm_intent_policy_func = NULL;
113 }
114
115 /*
116  * REFCOUNTED LOCK OBJECTS
117  */
118
119
120 /*
121  * Lock refcounts, during creation:
122  *   - one special one for allocation, dec'd only once in destroy
123  *   - one for being a lock that's in-use
124  *   - one for the addref associated with a new lock
125  */
126 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
127 {
128         atomic_inc(&lock->l_refc);
129         return lock;
130 }
131
132 void ldlm_lock_put(struct ldlm_lock *lock)
133 {
134         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
135         ENTRY;
136
137         if (atomic_dec_and_test(&lock->l_refc)) {
138                 l_lock(&ns->ns_lock);
139                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
140                 LASSERT(lock->l_destroyed);
141                 LASSERT(list_empty(&lock->l_res_link));
142
143                 spin_lock(&ns->ns_counter_lock);
144                 ns->ns_locks--;
145                 spin_unlock(&ns->ns_counter_lock);
146
147                 ldlm_resource_putref(lock->l_resource);
148                 lock->l_resource = NULL;
149
150                 if (lock->l_parent)
151                         LDLM_LOCK_PUT(lock->l_parent);
152
153                 PORTAL_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
154                 l_unlock(&ns->ns_lock);
155         }
156
157         EXIT;
158 }
159
160 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
161 {
162         ENTRY;
163         l_lock(&lock->l_resource->lr_namespace->ns_lock);
164         if (!list_empty(&lock->l_lru)) {
165                 list_del_init(&lock->l_lru);
166                 lock->l_resource->lr_namespace->ns_nr_unused--;
167                 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
168         }
169         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
170         EXIT;
171 }
172
173 /* This used to have a 'strict' flact, which recovery would use to mark an
174  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
175  * shall explain why it's gone: with the new hash table scheme, once you call
176  * ldlm_lock_destroy, you can never drop your final references on this lock.
177  * Because it's not in the hash table anymore.  -phil */
178 void ldlm_lock_destroy(struct ldlm_lock *lock)
179 {
180         ENTRY;
181         l_lock(&lock->l_resource->lr_namespace->ns_lock);
182
183         if (!list_empty(&lock->l_children)) {
184                 LDLM_ERROR(lock, "still has children (%p)!",
185                            lock->l_children.next);
186                 ldlm_lock_dump(D_ERROR, lock);
187                 LBUG();
188         }
189         if (lock->l_readers || lock->l_writers) {
190                 LDLM_ERROR(lock, "lock still has references");
191                 ldlm_lock_dump(D_ERROR, lock);
192                 LBUG();
193         }
194
195         if (!list_empty(&lock->l_res_link)) {
196                 ldlm_lock_dump(D_ERROR, lock);
197                 LBUG();
198         }
199
200         if (lock->l_destroyed) {
201                 LASSERT(list_empty(&lock->l_lru));
202                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
203                 EXIT;
204                 return;
205         }
206         lock->l_destroyed = 1;
207
208         list_del_init(&lock->l_export_chain);
209         ldlm_lock_remove_from_lru(lock);
210         portals_handle_unhash(&lock->l_handle);
211
212 #if 0
213         /* Wake anyone waiting for this lock */
214         /* FIXME: I should probably add yet another flag, instead of using
215          * l_export to only call this on clients */
216         lock->l_export = NULL;
217         if (lock->l_export && lock->l_completion_ast)
218                 lock->l_completion_ast(lock, 0);
219 #endif
220
221         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
222         LDLM_LOCK_PUT(lock);
223         EXIT;
224 }
225
226 /* this is called by portals_handle2object with the handle lock taken */
227 static void lock_handle_addref(void *lock)
228 {
229         LDLM_LOCK_GET((struct ldlm_lock *)lock);
230 }
231
232 /*
233  * usage: pass in a resource on which you have done ldlm_resource_get
234  *        pass in a parent lock on which you have done a ldlm_lock_get
235  *        after return, ldlm_*_put the resource and parent
236  * returns: lock with refcount 1
237  */
238 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
239                                        struct ldlm_resource *resource)
240 {
241         struct ldlm_lock *lock;
242         ENTRY;
243
244         if (resource == NULL)
245                 LBUG();
246
247         PORTAL_SLAB_ALLOC(lock, ldlm_lock_slab, sizeof(*lock));
248         if (lock == NULL)
249                 RETURN(NULL);
250
251         lock->l_resource = ldlm_resource_getref(resource);
252
253         atomic_set(&lock->l_refc, 2);
254         INIT_LIST_HEAD(&lock->l_children);
255         INIT_LIST_HEAD(&lock->l_res_link);
256         INIT_LIST_HEAD(&lock->l_lru);
257         INIT_LIST_HEAD(&lock->l_export_chain);
258         INIT_LIST_HEAD(&lock->l_pending_chain);
259         init_waitqueue_head(&lock->l_waitq);
260
261         spin_lock(&resource->lr_namespace->ns_counter_lock);
262         resource->lr_namespace->ns_locks++;
263         spin_unlock(&resource->lr_namespace->ns_counter_lock);
264
265         if (parent != NULL) {
266                 l_lock(&parent->l_resource->lr_namespace->ns_lock);
267                 lock->l_parent = LDLM_LOCK_GET(parent);
268                 list_add(&lock->l_childof, &parent->l_children);
269                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
270         }
271
272         INIT_LIST_HEAD(&lock->l_handle.h_link);
273         portals_handle_hash(&lock->l_handle, lock_handle_addref);
274
275         RETURN(lock);
276 }
277
278 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
279                               struct ldlm_res_id new_resid)
280 {
281         struct ldlm_resource *oldres = lock->l_resource;
282         ENTRY;
283
284         l_lock(&ns->ns_lock);
285         if (memcmp(&new_resid, &lock->l_resource->lr_name,
286                    sizeof(lock->l_resource->lr_name)) == 0) {
287                 /* Nothing to do */
288                 l_unlock(&ns->ns_lock);
289                 RETURN(0);
290         }
291
292         LASSERT(new_resid.name[0] != 0);
293
294         /* This function assumes that the lock isn't on any lists */
295         LASSERT(list_empty(&lock->l_res_link));
296
297         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
298                                              lock->l_resource->lr_type, 1);
299         if (lock->l_resource == NULL) {
300                 LBUG();
301                 RETURN(-ENOMEM);
302         }
303
304         /* ...and the flowers are still standing! */
305         ldlm_resource_putref(oldres);
306
307         l_unlock(&ns->ns_lock);
308         RETURN(0);
309 }
310
311 /*
312  *  HANDLES
313  */
314
315 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
316 {
317         memset(&lockh->addr, 0x69, sizeof(lockh->addr));
318         lockh->cookie = lock->l_handle.h_cookie;
319 }
320
321 /* if flags: atomically get the lock and set the flags.
322  *           Return NULL if flag already set
323  */
324
325 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
326 {
327         struct ldlm_lock *lock = NULL, *retval = NULL;
328         ENTRY;
329
330         LASSERT(handle);
331
332         lock = portals_handle2object(handle->cookie);
333         if (lock == NULL)
334                 RETURN(NULL);
335
336         LASSERT(lock->l_resource != NULL);
337         LASSERT(lock->l_resource->lr_namespace != NULL);
338
339         l_lock(&lock->l_resource->lr_namespace->ns_lock);
340
341         /* It's unlikely but possible that someone marked the lock as
342          * destroyed after we did handle2object on it */
343         if (lock->l_destroyed) {
344                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
345                 LDLM_LOCK_PUT(lock);
346                 GOTO(out, retval);
347         }
348
349         if (flags && (lock->l_flags & flags)) {
350                 LDLM_LOCK_PUT(lock);
351                 GOTO(out, retval);
352         }
353
354         if (flags)
355                 lock->l_flags |= flags;
356
357         retval = lock;
358         EXIT;
359  out:
360         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
361         return retval;
362 }
363
364 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
365                                       struct lustre_handle *handle)
366 {
367         struct ldlm_lock *retval = NULL;
368
369         l_lock(&ns->ns_lock);
370         retval = __ldlm_handle2lock(handle, 0);
371         l_unlock(&ns->ns_lock);
372
373         return retval;
374 }
375
376 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
377 {
378         return lockmode_compat(a->l_req_mode, b->l_req_mode);
379 }
380
381 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
382 {
383         ldlm_res2desc(lock->l_resource, &desc->l_resource);
384         desc->l_req_mode = lock->l_req_mode;
385         desc->l_granted_mode = lock->l_granted_mode;
386         memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
387         memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
388 }
389
390 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
391                                    struct ldlm_lock *new, 
392                                    void *data, int datalen)
393 {
394         struct ldlm_ast_work *w;
395         ENTRY;
396
397         l_lock(&lock->l_resource->lr_namespace->ns_lock);
398         if (new && (lock->l_flags & LDLM_FL_AST_SENT))
399                 GOTO(out, 0);
400
401         OBD_ALLOC(w, sizeof(*w));
402         if (!w) {
403                 LBUG();
404                 GOTO(out, 0);
405         }
406
407         w->w_data = data;
408         w->w_datalen = datalen;
409         if (new) {
410                 lock->l_flags |= LDLM_FL_AST_SENT;
411                 w->w_blocking = 1;
412                 ldlm_lock2desc(new, &w->w_desc);
413         }
414
415         w->w_lock = LDLM_LOCK_GET(lock);
416         list_add(&w->w_list, lock->l_resource->lr_tmp);
417         EXIT;
418  out:
419         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
420         return;
421 }
422
423 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
424 {
425         struct ldlm_lock *lock;
426
427         lock = ldlm_handle2lock(lockh);
428         ldlm_lock_addref_internal(lock, mode);
429         LDLM_LOCK_PUT(lock);
430 }
431
432 /* only called for local locks */
433 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
434 {
435         l_lock(&lock->l_resource->lr_namespace->ns_lock);
436         ldlm_lock_remove_from_lru(lock);
437         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
438                 lock->l_readers++;
439         else
440                 lock->l_writers++;
441         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
442         LDLM_LOCK_GET(lock);
443         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
444 }
445
446 /* Args: unlocked lock */
447 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
448                                     struct ldlm_res_id, int flags);
449
450 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
451 {
452         struct ldlm_namespace *ns;
453         ENTRY;
454
455         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
456         ns = lock->l_resource->lr_namespace;
457         l_lock(&ns->ns_lock);
458         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR) {
459                 LASSERT(lock->l_readers > 0);
460                 lock->l_readers--;
461         } else {
462                 LASSERT(lock->l_writers > 0);
463                 lock->l_writers--;
464         }
465
466         if (lock->l_flags & LDLM_FL_LOCAL &&
467             !lock->l_readers && !lock->l_writers) {
468                 /* If this is a local lock on a server namespace and this was
469                  * the last reference, cancel the lock. */
470                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
471                 lock->l_flags |= LDLM_FL_CBPENDING;
472         }
473
474         if (!lock->l_readers && !lock->l_writers &&
475             (lock->l_flags & LDLM_FL_CBPENDING)) {
476                 /* If we received a blocked AST and this was the last reference,
477                  * run the callback. */
478                 if (!ns->ns_client && lock->l_export)
479                         CERROR("FL_CBPENDING set on non-local lock--just a "
480                                "warning\n");
481
482                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
483                 l_unlock(&ns->ns_lock);
484
485                 /* FIXME: need a real 'desc' here */
486                 lock->l_blocking_ast(lock, NULL, lock->l_data,
487                                      LDLM_CB_BLOCKING);
488         } else if (ns->ns_client && !lock->l_readers && !lock->l_writers) {
489                 /* If this is a client-side namespace and this was the last
490                  * reference, put it on the LRU. */
491                 LASSERT(list_empty(&lock->l_lru));
492                 LASSERT(ns->ns_nr_unused >= 0);
493                 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
494                 ns->ns_nr_unused++;
495                 l_unlock(&ns->ns_lock);
496                 ldlm_cancel_lru(ns);
497         } else {
498                 l_unlock(&ns->ns_lock);
499         }
500
501         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
502
503         EXIT;
504 }
505
506 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
507 {
508         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
509         LASSERT(lock != NULL);
510         ldlm_lock_decref_internal(lock, mode);
511         LDLM_LOCK_PUT(lock);
512 }
513
514 /* This will drop a lock reference and mark it for destruction, but will not
515  * necessarily cancel the lock before returning. */
516 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
517 {
518         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
519         ENTRY;
520
521         LASSERT(lock != NULL);
522
523         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
524         l_lock(&lock->l_resource->lr_namespace->ns_lock);
525         lock->l_flags |= LDLM_FL_CBPENDING;
526         ldlm_lock_decref_internal(lock, mode);
527         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
528         LDLM_LOCK_PUT(lock);
529 }
530
531 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
532                                  struct list_head *queue)
533 {
534         struct list_head *tmp, *pos;
535         int rc = 1;
536
537         list_for_each_safe(tmp, pos, queue) {
538                 struct ldlm_lock *child;
539                 ldlm_res_compat compat;
540
541                 child = list_entry(tmp, struct ldlm_lock, l_res_link);
542                 if (lock == child)
543                         continue;
544
545                 compat = ldlm_res_compat_table[child->l_resource->lr_type];
546                 if (compat && compat(child, lock)) {
547                         CDEBUG(D_OTHER, "compat function succeded, next.\n");
548                         continue;
549                 }
550                 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
551                         CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
552                         continue;
553                 }
554
555                 rc = 0;
556
557                 if (send_cbs && child->l_blocking_ast != NULL) {
558                         CDEBUG(D_OTHER, "lock %p incompatible; sending "
559                                "blocking AST.\n", child);
560                         ldlm_add_ast_work_item(child, lock, NULL, 0);
561                 }
562         }
563
564         return rc;
565 }
566
567 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
568 {
569         int rc;
570         ENTRY;
571
572         l_lock(&lock->l_resource->lr_namespace->ns_lock);
573         rc = ldlm_lock_compat_list(lock, send_cbs,
574                                    &lock->l_resource->lr_granted);
575         /* FIXME: should we be sending ASTs to converting? */
576         if (rc)
577                 rc = ldlm_lock_compat_list
578                         (lock, send_cbs, &lock->l_resource->lr_converting);
579
580         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
581         RETURN(rc);
582 }
583
584 /* NOTE: called by
585  *  - ldlm_lock_enqueue
586  *  - ldlm_reprocess_queue
587  *  - ldlm_lock_convert
588  */
589 void ldlm_grant_lock(struct ldlm_lock *lock, void *data, int datalen)
590 {
591         struct ldlm_resource *res = lock->l_resource;
592         ENTRY;
593
594         l_lock(&lock->l_resource->lr_namespace->ns_lock);
595         ldlm_resource_add_lock(res, &res->lr_granted, lock);
596         lock->l_granted_mode = lock->l_req_mode;
597
598         if (lock->l_granted_mode < res->lr_most_restr)
599                 res->lr_most_restr = lock->l_granted_mode;
600
601         if (lock->l_completion_ast != NULL)
602                 ldlm_add_ast_work_item(lock, NULL, data, datalen);
603
604         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
605         EXIT;
606 }
607
608 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
609  * comment above ldlm_lock_match */
610 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
611                                       struct ldlm_extent *extent,
612                                       struct ldlm_lock *old_lock, int flags)
613 {
614         struct ldlm_lock *lock;
615         struct list_head *tmp;
616
617         list_for_each(tmp, queue) {
618                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
619
620                 if (lock == old_lock)
621                         break;
622
623                 if (lock->l_flags & LDLM_FL_CBPENDING)
624                         continue;
625
626                 if (lock->l_req_mode != mode)
627                         continue;
628
629                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
630                     (lock->l_extent.start > extent->start ||
631                      lock->l_extent.end < extent->end))
632                         continue;
633
634                 if (lock->l_destroyed)
635                         continue;
636
637                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
638                     !(lock->l_flags & LDLM_FL_LOCAL))
639                         continue;
640
641                 ldlm_lock_addref_internal(lock, mode);
642                 return lock;
643         }
644
645         return NULL;
646 }
647
648 /* Can be called in two ways:
649  *
650  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
651  * for a duplicate of.
652  *
653  * Otherwise, all of the fields must be filled in, to match against.
654  *
655  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
656  *     server (ie, connh is NULL)
657  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
658  *     list will be considered
659  *
660  * Returns 1 if it finds an already-existing lock that is compatible; in this
661  * case, lockh is filled in with a addref()ed lock
662  */
663 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
664                     struct ldlm_res_id *res_id, __u32 type, void *cookie,
665                     int cookielen, ldlm_mode_t mode,struct lustre_handle *lockh)
666 {
667         struct ldlm_resource *res;
668         struct ldlm_lock *lock, *old_lock = NULL;
669         int rc = 0;
670         ENTRY;
671
672         if (ns == NULL) {
673                 old_lock = ldlm_handle2lock(lockh);
674                 LASSERT(old_lock);
675
676                 ns = old_lock->l_resource->lr_namespace;
677                 res_id = &old_lock->l_resource->lr_name;
678                 type = old_lock->l_resource->lr_type;
679                 mode = old_lock->l_req_mode;
680         }
681
682         res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
683         if (res == NULL) {
684                 LASSERT(old_lock == NULL);
685                 RETURN(0);
686         }
687
688         l_lock(&ns->ns_lock);
689
690         lock = search_queue(&res->lr_granted, mode, cookie, old_lock, flags);
691         if (lock != NULL)
692                 GOTO(out, rc = 1);
693         if (flags & LDLM_FL_BLOCK_GRANTED)
694                 GOTO(out, rc = 0);
695         lock = search_queue(&res->lr_converting, mode, cookie, old_lock, flags);
696         if (lock != NULL)
697                 GOTO(out, rc = 1);
698         lock = search_queue(&res->lr_waiting, mode, cookie, old_lock, flags);
699         if (lock != NULL)
700                 GOTO(out, rc = 1);
701
702         EXIT;
703        out:
704         ldlm_resource_putref(res);
705         l_unlock(&ns->ns_lock);
706
707         if (lock) {
708                 ldlm_lock2handle(lock, lockh);
709                 if (lock->l_completion_ast)
710                         lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, NULL);
711         }
712         if (rc)
713                 LDLM_DEBUG(lock, "matched");
714         else
715                 LDLM_DEBUG_NOLOCK("not matched");
716
717         if (old_lock)
718                 LDLM_LOCK_PUT(old_lock);
719
720         return rc;
721 }
722
723 /* Returns a referenced lock */
724 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
725                                    struct lustre_handle *parent_lock_handle,
726                                    struct ldlm_res_id res_id, __u32 type,
727                                    ldlm_mode_t mode, void *data, void *cp_data)
728 {
729         struct ldlm_resource *res, *parent_res = NULL;
730         struct ldlm_lock *lock, *parent_lock = NULL;
731         ENTRY;
732
733         if (parent_lock_handle) {
734                 parent_lock = ldlm_handle2lock(parent_lock_handle);
735                 if (parent_lock)
736                         parent_res = parent_lock->l_resource;
737         }
738
739         res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
740         if (res == NULL)
741                 RETURN(NULL);
742
743         lock = ldlm_lock_new(parent_lock, res);
744         ldlm_resource_putref(res);
745         if (parent_lock != NULL)
746                 LDLM_LOCK_PUT(parent_lock);
747
748         if (lock == NULL)
749                 RETURN(NULL);
750
751         lock->l_req_mode = mode;
752         lock->l_data = data;
753         lock->l_cp_data = cp_data;
754
755         RETURN(lock);
756 }
757
758 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
759                                struct ldlm_lock **lockp,
760                                void *cookie, int cookie_len,
761                                int *flags,
762                                ldlm_completion_callback completion,
763                                ldlm_blocking_callback blocking)
764 {
765         struct ldlm_resource *res;
766         struct ldlm_lock *lock = *lockp;
767         int local;
768         ldlm_res_policy policy;
769         ENTRY;
770
771         res = lock->l_resource;
772         lock->l_blocking_ast = blocking;
773
774         if (res->lr_type == LDLM_EXTENT)
775                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
776
777         /* policies are not executed on the client or during replay */
778         local = res->lr_namespace->ns_client;
779         if (!local && !(*flags & LDLM_FL_REPLAY) &&
780             (policy = ldlm_res_policy_table[res->lr_type])) {
781                 int rc;
782                 rc = policy(ns, lockp, cookie, lock->l_req_mode, *flags, NULL);
783                 if (rc == ELDLM_LOCK_CHANGED) {
784                         res = lock->l_resource;
785                         *flags |= LDLM_FL_LOCK_CHANGED;
786                 } else if (rc == ELDLM_LOCK_REPLACED) {
787                         /* The lock that was returned has already been granted,
788                          * and placed into lockp.  Destroy the old one and our
789                          * work here is done. */
790                         ldlm_lock_destroy(lock);
791                         LDLM_LOCK_PUT(lock);
792                         *flags |= LDLM_FL_LOCK_CHANGED;
793                         RETURN(0);
794                 } else if (rc == ELDLM_LOCK_ABORTED) {
795                         ldlm_lock_destroy(lock);
796                         RETURN(rc);
797                 }
798         }
799
800         l_lock(&ns->ns_lock);
801         if (local && lock->l_req_mode == lock->l_granted_mode) {
802                 /* The server returned a blocked lock, but it was granted before
803                  * we got a chance to actually enqueue it.  We don't need to do
804                  * anything else. */
805                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
806                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
807                 GOTO(out, ELDLM_OK);
808         }
809
810         /* This distinction between local lock trees is very important; a client
811          * namespace only has information about locks taken by that client, and
812          * thus doesn't have enough information to decide for itself if it can
813          * be granted (below).  In this case, we do exactly what the server
814          * tells us to do, as dictated by the 'flags'.
815          *
816          * We do exactly the same thing during recovery, when the server is
817          * more or less trusting the clients not to lie.
818          *
819          * FIXME (bug 268): Detect obvious lies by checking compatibility in
820          * granted/converting queues. */
821         ldlm_resource_unlink_lock(lock);
822         if (local) {
823                 if (*flags & LDLM_FL_BLOCK_CONV)
824                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
825                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
826                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
827                 else
828                         ldlm_grant_lock(lock, NULL, 0);
829                 GOTO(out, ELDLM_OK);
830         } else if (*flags & LDLM_FL_REPLAY) {
831                 if (*flags & LDLM_FL_BLOCK_CONV) {
832                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
833                         GOTO(out, ELDLM_OK);
834                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
835                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
836                         GOTO(out, ELDLM_OK);
837                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
838                         ldlm_grant_lock(lock, NULL, 0);
839                         GOTO(out, ELDLM_OK);
840                 }
841                 /* If no flags, fall through to normal enqueue path. */
842         }
843
844         /* FIXME: We may want to optimize by checking lr_most_restr */
845         if (!list_empty(&res->lr_converting)) {
846                 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
847                 *flags |= LDLM_FL_BLOCK_CONV;
848                 GOTO(out, ELDLM_OK);
849         }
850         if (!list_empty(&res->lr_waiting)) {
851                 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
852                 *flags |= LDLM_FL_BLOCK_WAIT;
853                 GOTO(out, ELDLM_OK);
854         }
855         if (!ldlm_lock_compat(lock, 0)) {
856                 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
857                 *flags |= LDLM_FL_BLOCK_GRANTED;
858                 GOTO(out, ELDLM_OK);
859         }
860
861         if (lock->l_granted_cb != NULL && lock->l_data != NULL) {
862                 /* We just -know- */
863                 struct ptlrpc_request *req = lock->l_data;
864                 lock->l_granted_cb(lock, req->rq_repmsg, 0);
865         }
866         ldlm_grant_lock(lock, NULL, 0);
867         EXIT;
868       out:
869         l_unlock(&ns->ns_lock);
870         /* Don't set 'completion_ast' until here so that if the lock is granted
871          * immediately we don't do an unnecessary completion call. */
872         lock->l_completion_ast = completion;
873         return ELDLM_OK;
874 }
875
876 /* Must be called with namespace taken: queue is waiting or converting. */
877 static int ldlm_reprocess_queue(struct ldlm_resource *res,
878                                 struct list_head *queue)
879 {
880         struct list_head *tmp, *pos;
881         ENTRY;
882
883         list_for_each_safe(tmp, pos, queue) {
884                 struct ldlm_lock *pending;
885                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
886
887                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
888
889                 if (!ldlm_lock_compat(pending, 1))
890                         RETURN(1);
891
892                 list_del_init(&pending->l_res_link);
893                 ldlm_grant_lock(pending, NULL, 0);
894         }
895
896         RETURN(0);
897 }
898
899 int ldlm_run_ast_work(struct list_head *rpc_list)
900 {
901         struct list_head *tmp, *pos;
902         int rc, retval = 0;
903         ENTRY;
904
905         list_for_each_safe(tmp, pos, rpc_list) {
906                 struct ldlm_ast_work *w =
907                         list_entry(tmp, struct ldlm_ast_work, w_list);
908
909                 if (w->w_blocking)
910                         rc = w->w_lock->l_blocking_ast
911                                 (w->w_lock, &w->w_desc, w->w_data,
912                                  LDLM_CB_BLOCKING);
913                 else
914                         rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags,
915                                                          w->w_data);
916                 if (rc == -ERESTART)
917                         retval = rc;
918                 else if (rc)
919                         CERROR("Failed AST - should clean & disconnect "
920                                "client\n");
921                 LDLM_LOCK_PUT(w->w_lock);
922                 list_del(&w->w_list);
923                 OBD_FREE(w, sizeof(*w));
924         }
925         RETURN(retval);
926 }
927
928 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
929 {
930         ldlm_reprocess_all(res);
931         return LDLM_ITER_CONTINUE;
932 }
933
934 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
935 {
936         (void)ldlm_namespace_foreach_res(ns, reprocess_one_queue, NULL);
937 }
938
939 void ldlm_reprocess_all(struct ldlm_resource *res)
940 {
941         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
942         int rc;
943         ENTRY;
944
945         /* Local lock trees don't get reprocessed. */
946         if (res->lr_namespace->ns_client) {
947                 EXIT;
948                 return;
949         }
950
951  restart:
952         l_lock(&res->lr_namespace->ns_lock);
953         res->lr_tmp = &rpc_list;
954
955         ldlm_reprocess_queue(res, &res->lr_converting);
956         if (list_empty(&res->lr_converting))
957                 ldlm_reprocess_queue(res, &res->lr_waiting);
958
959         res->lr_tmp = NULL;
960         l_unlock(&res->lr_namespace->ns_lock);
961
962         rc = ldlm_run_ast_work(&rpc_list);
963         if (rc == -ERESTART)
964                 goto restart;
965         EXIT;
966 }
967
968 void ldlm_cancel_callback(struct ldlm_lock *lock)
969 {
970         l_lock(&lock->l_resource->lr_namespace->ns_lock);
971         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
972                 lock->l_flags |= LDLM_FL_CANCEL;
973                 if (lock->l_blocking_ast)
974                         lock->l_blocking_ast(lock, NULL, lock->l_data,
975                                              LDLM_CB_CANCELING);
976                 else
977                         LDLM_DEBUG(lock, "no blocking ast");
978         }
979         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
980 }
981
982 void ldlm_lock_cancel(struct ldlm_lock *lock)
983 {
984         struct ldlm_resource *res;
985         struct ldlm_namespace *ns;
986         ENTRY;
987
988         ldlm_del_waiting_lock(lock);
989
990         res = lock->l_resource;
991         ns = res->lr_namespace;
992
993         l_lock(&ns->ns_lock);
994         /* Please do not, no matter how tempting, remove this LBUG without
995          * talking to me first. -phik */
996         if (lock->l_readers || lock->l_writers) {
997                 LDLM_DEBUG(lock, "lock still has references");
998                 ldlm_lock_dump(D_OTHER, lock);
999                 LBUG();
1000         }
1001
1002         ldlm_cancel_callback(lock);
1003
1004         ldlm_resource_unlink_lock(lock);
1005         ldlm_lock_destroy(lock);
1006         l_unlock(&ns->ns_lock);
1007         EXIT;
1008 }
1009
1010 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data, void *cp_data)
1011 {
1012         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1013         ENTRY;
1014
1015         if (lock == NULL)
1016                 RETURN(-EINVAL);
1017
1018         lock->l_data = data;
1019         lock->l_cp_data = cp_data;
1020
1021         LDLM_LOCK_PUT(lock);
1022
1023         RETURN(0);
1024 }
1025
1026 /* This function is only called from one thread (per export); no locking around
1027  * the list ops needed */
1028 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1029 {
1030         struct list_head *iter, *n;
1031
1032         list_for_each_safe(iter, n, &exp->exp_ldlm_data.led_held_locks) {
1033                 struct ldlm_lock *lock;
1034                 struct ldlm_resource *res;
1035                 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
1036                 res = ldlm_resource_getref(lock->l_resource);
1037                 LDLM_DEBUG(lock, "export %p", exp);
1038                 ldlm_lock_cancel(lock);
1039                 ldlm_reprocess_all(res);
1040                 ldlm_resource_putref(res);
1041         }
1042 }
1043
1044 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1045                                         int *flags)
1046 {
1047         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1048         struct ldlm_resource *res;
1049         struct ldlm_namespace *ns;
1050         int granted = 0;
1051         ENTRY;
1052
1053         LBUG();
1054
1055         res = lock->l_resource;
1056         ns = res->lr_namespace;
1057
1058         l_lock(&ns->ns_lock);
1059
1060         lock->l_req_mode = new_mode;
1061         ldlm_resource_unlink_lock(lock);
1062
1063         /* If this is a local resource, put it on the appropriate list. */
1064         if (res->lr_namespace->ns_client) {
1065                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1066                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1067                 } else {
1068                         /* This should never happen, because of the way the
1069                          * server handles conversions. */
1070                         LBUG();
1071
1072                         res->lr_tmp = &rpc_list;
1073                         ldlm_grant_lock(lock, NULL, 0);
1074                         res->lr_tmp = NULL;
1075                         granted = 1;
1076                         /* FIXME: completion handling not with ns_lock held ! */
1077                         if (lock->l_completion_ast)
1078                                 lock->l_completion_ast(lock, 0, NULL);
1079                 }
1080         } else {
1081                 /* FIXME: We should try the conversion right away and possibly
1082                  * return success without the need for an extra AST */
1083                 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1084                 *flags |= LDLM_FL_BLOCK_CONV;
1085         }
1086
1087         l_unlock(&ns->ns_lock);
1088
1089         if (granted)
1090                 ldlm_run_ast_work(&rpc_list);
1091         RETURN(res);
1092 }
1093
1094 void ldlm_lock_dump(int level, struct ldlm_lock *lock)
1095 {
1096         char ver[128];
1097
1098         if (!((portal_debug | D_ERROR) & level))
1099                 return;
1100
1101         if (RES_VERSION_SIZE != 4)
1102                 LBUG();
1103
1104         if (!lock) {
1105                 CDEBUG(level, "  NULL LDLM lock\n");
1106                 return;
1107         }
1108
1109         snprintf(ver, sizeof(ver), "%x %x %x %x",
1110                  lock->l_version[0], lock->l_version[1],
1111                  lock->l_version[2], lock->l_version[3]);
1112
1113         CDEBUG(level, "  -- Lock dump: %p (%s) (rc: %d)\n", lock, ver,
1114                atomic_read(&lock->l_refc));
1115         if (lock->l_export && lock->l_export->exp_connection)
1116                 CDEBUG(level, "  Node: NID %x (rhandle: "LPX64")\n",
1117                        lock->l_export->exp_connection->c_peer.peer_nid,
1118                        lock->l_remote_handle.cookie);
1119         else
1120                 CDEBUG(level, "  Node: local\n");
1121         CDEBUG(level, "  Parent: %p\n", lock->l_parent);
1122         CDEBUG(level, "  Resource: %p ("LPD64")\n", lock->l_resource,
1123                lock->l_resource->lr_name.name[0]);
1124         CDEBUG(level, "  Requested mode: %d, granted mode: %d\n",
1125                (int)lock->l_req_mode, (int)lock->l_granted_mode);
1126         CDEBUG(level, "  Readers: %u ; Writers; %u\n",
1127                lock->l_readers, lock->l_writers);
1128         if (lock->l_resource->lr_type == LDLM_EXTENT)
1129                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64"\n",
1130                        lock->l_extent.start, lock->l_extent.end);
1131 }
1132
1133 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1134 {
1135         struct ldlm_lock *lock;
1136
1137         lock = ldlm_handle2lock(lockh);
1138         if (lock == NULL)
1139                 return;
1140
1141         ldlm_lock_dump(D_OTHER, lock);
1142
1143         LDLM_LOCK_PUT(lock);
1144 }