1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
27 # include <linux/slab.h>
28 # include <linux/module.h>
29 # include <linux/lustre_dlm.h>
31 # include <liblustre.h>
32 # include <linux/kp30.h>
35 #include <linux/obd_class.h>
36 #include "ldlm_internal.h"
38 //struct lustre_lock ldlm_everything_lock;
41 char *ldlm_lockname[] = {
50 char *ldlm_typename[] = {
56 char *ldlm_it2str(int it)
63 case (IT_OPEN | IT_CREAT):
76 CERROR("Unknown intent %d\n", it);
82 extern kmem_cache_t *ldlm_lock_slab;
83 struct lustre_lock ldlm_handle_lock;
85 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
87 static ldlm_res_compat ldlm_res_compat_table[] = {
88 [LDLM_PLAIN] ldlm_plain_compat,
89 [LDLM_EXTENT] ldlm_extent_compat,
92 static ldlm_res_policy ldlm_intent_policy_func;
94 static int ldlm_plain_policy(struct ldlm_namespace *ns, struct ldlm_lock **lock,
95 void *req_cookie, ldlm_mode_t mode, int flags,
98 if ((flags & LDLM_FL_HAS_INTENT) && ldlm_intent_policy_func) {
99 return ldlm_intent_policy_func(ns, lock, req_cookie, mode,
106 static ldlm_res_policy ldlm_res_policy_table[] = {
107 [LDLM_PLAIN] ldlm_plain_policy,
108 [LDLM_EXTENT] ldlm_extent_policy,
111 void ldlm_register_intent(ldlm_res_policy arg)
113 ldlm_intent_policy_func = arg;
116 void ldlm_unregister_intent(void)
118 ldlm_intent_policy_func = NULL;
122 * REFCOUNTED LOCK OBJECTS
127 * Lock refcounts, during creation:
128 * - one special one for allocation, dec'd only once in destroy
129 * - one for being a lock that's in-use
130 * - one for the addref associated with a new lock
132 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
134 atomic_inc(&lock->l_refc);
138 void ldlm_lock_put(struct ldlm_lock *lock)
140 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
143 if (atomic_dec_and_test(&lock->l_refc)) {
144 l_lock(&ns->ns_lock);
145 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
146 LASSERT(lock->l_destroyed);
147 LASSERT(list_empty(&lock->l_res_link));
149 spin_lock(&ns->ns_counter_lock);
151 spin_unlock(&ns->ns_counter_lock);
153 ldlm_resource_putref(lock->l_resource);
154 lock->l_resource = NULL;
156 class_export_put(lock->l_export);
159 LDLM_LOCK_PUT(lock->l_parent);
161 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
162 l_unlock(&ns->ns_lock);
168 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
171 l_lock(&lock->l_resource->lr_namespace->ns_lock);
172 if (!list_empty(&lock->l_lru)) {
173 list_del_init(&lock->l_lru);
174 lock->l_resource->lr_namespace->ns_nr_unused--;
175 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
177 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
181 /* This used to have a 'strict' flact, which recovery would use to mark an
182 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
183 * shall explain why it's gone: with the new hash table scheme, once you call
184 * ldlm_lock_destroy, you can never drop your final references on this lock.
185 * Because it's not in the hash table anymore. -phil */
186 void ldlm_lock_destroy(struct ldlm_lock *lock)
189 l_lock(&lock->l_resource->lr_namespace->ns_lock);
191 if (!list_empty(&lock->l_children)) {
192 LDLM_ERROR(lock, "still has children (%p)!",
193 lock->l_children.next);
194 ldlm_lock_dump(D_ERROR, lock, 0);
197 if (lock->l_readers || lock->l_writers) {
198 LDLM_ERROR(lock, "lock still has references");
199 ldlm_lock_dump(D_ERROR, lock, 0);
203 if (!list_empty(&lock->l_res_link)) {
204 ldlm_lock_dump(D_ERROR, lock, 0);
208 if (lock->l_destroyed) {
209 LASSERT(list_empty(&lock->l_lru));
210 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
214 lock->l_destroyed = 1;
216 list_del_init(&lock->l_export_chain);
217 ldlm_lock_remove_from_lru(lock);
218 class_handle_unhash(&lock->l_handle);
221 /* Wake anyone waiting for this lock */
222 /* FIXME: I should probably add yet another flag, instead of using
223 * l_export to only call this on clients */
225 class_export_put(lock->l_export);
226 lock->l_export = NULL;
227 if (lock->l_export && lock->l_completion_ast)
228 lock->l_completion_ast(lock, 0);
231 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
236 /* this is called by portals_handle2object with the handle lock taken */
237 static void lock_handle_addref(void *lock)
239 LDLM_LOCK_GET((struct ldlm_lock *)lock);
243 * usage: pass in a resource on which you have done ldlm_resource_get
244 * pass in a parent lock on which you have done a ldlm_lock_get
245 * after return, ldlm_*_put the resource and parent
246 * returns: lock with refcount 1
248 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
249 struct ldlm_resource *resource)
251 struct ldlm_lock *lock;
254 if (resource == NULL)
257 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, SLAB_KERNEL, sizeof(*lock));
261 lock->l_resource = ldlm_resource_getref(resource);
263 atomic_set(&lock->l_refc, 2);
264 INIT_LIST_HEAD(&lock->l_children);
265 INIT_LIST_HEAD(&lock->l_res_link);
266 INIT_LIST_HEAD(&lock->l_lru);
267 INIT_LIST_HEAD(&lock->l_export_chain);
268 INIT_LIST_HEAD(&lock->l_pending_chain);
269 init_waitqueue_head(&lock->l_waitq);
271 spin_lock(&resource->lr_namespace->ns_counter_lock);
272 resource->lr_namespace->ns_locks++;
273 spin_unlock(&resource->lr_namespace->ns_counter_lock);
275 if (parent != NULL) {
276 l_lock(&parent->l_resource->lr_namespace->ns_lock);
277 lock->l_parent = LDLM_LOCK_GET(parent);
278 list_add(&lock->l_childof, &parent->l_children);
279 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
282 INIT_LIST_HEAD(&lock->l_handle.h_link);
283 class_handle_hash(&lock->l_handle, lock_handle_addref);
288 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
289 struct ldlm_res_id new_resid)
291 struct ldlm_resource *oldres = lock->l_resource;
294 l_lock(&ns->ns_lock);
295 if (memcmp(&new_resid, &lock->l_resource->lr_name,
296 sizeof(lock->l_resource->lr_name)) == 0) {
298 l_unlock(&ns->ns_lock);
302 LASSERT(new_resid.name[0] != 0);
304 /* This function assumes that the lock isn't on any lists */
305 LASSERT(list_empty(&lock->l_res_link));
307 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
308 lock->l_resource->lr_type, 1);
309 if (lock->l_resource == NULL) {
314 /* ...and the flowers are still standing! */
315 ldlm_resource_putref(oldres);
317 l_unlock(&ns->ns_lock);
325 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
327 lockh->cookie = lock->l_handle.h_cookie;
330 /* if flags: atomically get the lock and set the flags.
331 * Return NULL if flag already set
334 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
336 struct ldlm_namespace *ns;
337 struct ldlm_lock *lock = NULL, *retval = NULL;
342 lock = class_handle2object(handle->cookie);
346 LASSERT(lock->l_resource != NULL);
347 ns = lock->l_resource->lr_namespace;
350 l_lock(&ns->ns_lock);
352 /* It's unlikely but possible that someone marked the lock as
353 * destroyed after we did handle2object on it */
354 if (lock->l_destroyed) {
355 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
360 if (flags && (lock->l_flags & flags)) {
366 lock->l_flags |= flags;
371 l_unlock(&ns->ns_lock);
375 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
376 struct lustre_handle *handle)
378 struct ldlm_lock *retval = NULL;
380 l_lock(&ns->ns_lock);
381 retval = __ldlm_handle2lock(handle, 0);
382 l_unlock(&ns->ns_lock);
387 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
389 return lockmode_compat(a->l_req_mode, b->l_req_mode);
392 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
394 ldlm_res2desc(lock->l_resource, &desc->l_resource);
395 desc->l_req_mode = lock->l_req_mode;
396 desc->l_granted_mode = lock->l_granted_mode;
397 memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
398 memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
401 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
402 struct ldlm_lock *new,
403 void *data, int datalen)
405 struct ldlm_ast_work *w;
408 l_lock(&lock->l_resource->lr_namespace->ns_lock);
409 if (new && (lock->l_flags & LDLM_FL_AST_SENT))
412 CDEBUG(D_OTHER, "lock %p incompatible; sending blocking AST.\n", lock);
414 OBD_ALLOC(w, sizeof(*w));
421 w->w_datalen = datalen;
423 lock->l_flags |= LDLM_FL_AST_SENT;
425 ldlm_lock2desc(new, &w->w_desc);
428 w->w_lock = LDLM_LOCK_GET(lock);
429 list_add(&w->w_list, lock->l_resource->lr_tmp);
432 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
436 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
438 struct ldlm_lock *lock;
440 lock = ldlm_handle2lock(lockh);
441 ldlm_lock_addref_internal(lock, mode);
445 /* only called for local locks */
446 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
448 l_lock(&lock->l_resource->lr_namespace->ns_lock);
449 ldlm_lock_remove_from_lru(lock);
450 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
454 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
456 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
459 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
461 struct ldlm_namespace *ns;
464 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
465 ns = lock->l_resource->lr_namespace;
466 l_lock(&ns->ns_lock);
467 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR) {
468 LASSERT(lock->l_readers > 0);
471 LASSERT(lock->l_writers > 0);
475 if (lock->l_flags & LDLM_FL_LOCAL &&
476 !lock->l_readers && !lock->l_writers) {
477 /* If this is a local lock on a server namespace and this was
478 * the last reference, cancel the lock. */
479 CDEBUG(D_INFO, "forcing cancel of local lock\n");
480 lock->l_flags |= LDLM_FL_CBPENDING;
483 if (!lock->l_readers && !lock->l_writers &&
484 (lock->l_flags & LDLM_FL_CBPENDING)) {
485 /* If we received a blocked AST and this was the last reference,
486 * run the callback. */
487 if (!ns->ns_client && lock->l_export)
488 CERROR("FL_CBPENDING set on non-local lock--just a "
491 LDLM_DEBUG(lock, "final decref done on cbpending lock");
492 l_unlock(&ns->ns_lock);
494 /* FIXME: need a real 'desc' here */
495 if (lock->l_blocking_ast != NULL)
496 lock->l_blocking_ast(lock, NULL, lock->l_data,
499 LDLM_DEBUG(lock, "No blocking AST?");
500 } else if (ns->ns_client && !lock->l_readers && !lock->l_writers) {
501 /* If this is a client-side namespace and this was the last
502 * reference, put it on the LRU. */
503 LASSERT(list_empty(&lock->l_lru));
504 LASSERT(ns->ns_nr_unused >= 0);
505 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
507 l_unlock(&ns->ns_lock);
510 l_unlock(&ns->ns_lock);
513 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
518 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
520 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
521 LASSERT(lock != NULL);
522 ldlm_lock_decref_internal(lock, mode);
526 /* This will drop a lock reference and mark it for destruction, but will not
527 * necessarily cancel the lock before returning. */
528 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
530 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
533 LASSERT(lock != NULL);
535 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
536 l_lock(&lock->l_resource->lr_namespace->ns_lock);
537 lock->l_flags |= LDLM_FL_CBPENDING;
538 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
539 ldlm_lock_decref_internal(lock, mode);
543 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
544 struct list_head *queue)
546 struct list_head *tmp, *pos;
549 list_for_each_safe(tmp, pos, queue) {
550 struct ldlm_lock *child;
551 ldlm_res_compat compat;
553 child = list_entry(tmp, struct ldlm_lock, l_res_link);
557 compat = ldlm_res_compat_table[child->l_resource->lr_type];
558 if (compat && compat(child, lock)) {
559 CDEBUG(D_OTHER, "compat function succeded, next.\n");
562 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
563 CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
569 if (send_cbs && child->l_blocking_ast != NULL)
570 ldlm_add_ast_work_item(child, lock, NULL, 0);
576 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
581 l_lock(&lock->l_resource->lr_namespace->ns_lock);
582 rc = ldlm_lock_compat_list(lock, send_cbs,
583 &lock->l_resource->lr_granted);
584 /* FIXME: should we be sending ASTs to converting? */
586 rc = ldlm_lock_compat_list
587 (lock, send_cbs, &lock->l_resource->lr_converting);
589 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
594 * - ldlm_lock_enqueue
595 * - ldlm_reprocess_queue
596 * - ldlm_lock_convert
598 void ldlm_grant_lock(struct ldlm_lock *lock, void *data, int datalen,
601 struct ldlm_resource *res = lock->l_resource;
604 l_lock(&lock->l_resource->lr_namespace->ns_lock);
605 ldlm_resource_add_lock(res, &res->lr_granted, lock);
606 lock->l_granted_mode = lock->l_req_mode;
608 if (lock->l_granted_mode < res->lr_most_restr)
609 res->lr_most_restr = lock->l_granted_mode;
611 if (run_ast && lock->l_completion_ast != NULL)
612 ldlm_add_ast_work_item(lock, NULL, data, datalen);
614 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
618 /* returns a referenced lock or NULL. See the flag descriptions below, in the
619 * comment above ldlm_lock_match */
620 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
621 struct ldlm_extent *extent,
622 struct ldlm_lock *old_lock, void *data,
625 struct ldlm_lock *lock;
626 struct list_head *tmp;
628 list_for_each(tmp, queue) {
629 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
631 if (lock == old_lock)
634 /* llite sometimes wants to match locks that will be
635 * canceled when their users drop, but we allow it to match
636 * if it passes in CBPENDING and the lock still has users.
637 * this is generally only going to be used by children
638 * whose parents already hold a lock so forward progress
639 * can still happen. */
640 if (lock->l_flags & LDLM_FL_CBPENDING &&
641 !(flags & LDLM_FL_CBPENDING))
643 if (lock->l_flags & LDLM_FL_CBPENDING &&
644 lock->l_readers == 0 && lock->l_writers == 0)
647 if (lock->l_req_mode != mode)
650 if (lock->l_resource->lr_type == LDLM_EXTENT &&
651 (lock->l_extent.start > extent->start ||
652 lock->l_extent.end < extent->end))
655 if (lock->l_destroyed)
658 if ((flags & LDLM_FL_LOCAL_ONLY) &&
659 !(lock->l_flags & LDLM_FL_LOCAL))
662 if ((flags & LDLM_FL_MATCH_DATA) && lock->l_data != data) {
663 LDLM_DEBUG(lock, "data mismatch: have %p, want %p",
668 ldlm_lock_addref_internal(lock, mode);
675 /* Can be called in two ways:
677 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
678 * for a duplicate of.
680 * Otherwise, all of the fields must be filled in, to match against.
682 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
683 * server (ie, connh is NULL)
684 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
685 * list will be considered
686 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
687 * to be canceled can still be matched as long as they still have reader
688 * or writer refernces
689 * If 'flags' contains LDLM_FL_MATCH_DATA, then only match a lock if the opaque
692 * Returns 1 if it finds an already-existing lock that is compatible; in this
693 * case, lockh is filled in with a addref()ed lock
695 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
696 struct ldlm_res_id *res_id, __u32 type, void *cookie,
697 int cookielen, ldlm_mode_t mode, void *data,
698 struct lustre_handle *lockh)
700 struct ldlm_resource *res;
701 struct ldlm_lock *lock, *old_lock = NULL;
706 old_lock = ldlm_handle2lock(lockh);
709 ns = old_lock->l_resource->lr_namespace;
710 res_id = &old_lock->l_resource->lr_name;
711 type = old_lock->l_resource->lr_type;
712 mode = old_lock->l_req_mode;
715 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
717 LASSERT(old_lock == NULL);
721 l_lock(&ns->ns_lock);
723 lock = search_queue(&res->lr_granted, mode, cookie, old_lock, data,
727 if (flags & LDLM_FL_BLOCK_GRANTED)
729 lock = search_queue(&res->lr_converting, mode, cookie, old_lock, data,
733 lock = search_queue(&res->lr_waiting, mode, cookie, old_lock, data,
740 ldlm_resource_putref(res);
741 l_unlock(&ns->ns_lock);
744 ldlm_lock2handle(lock, lockh);
745 if (lock->l_completion_ast)
746 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
750 LDLM_DEBUG(lock, "matched");
752 LDLM_DEBUG_NOLOCK("not matched");
755 LDLM_LOCK_PUT(old_lock);
760 /* Returns a referenced lock */
761 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
762 struct lustre_handle *parent_lock_handle,
763 struct ldlm_res_id res_id, __u32 type,
765 ldlm_blocking_callback blocking,
766 ldlm_completion_callback completion,
769 struct ldlm_resource *res, *parent_res = NULL;
770 struct ldlm_lock *lock, *parent_lock = NULL;
773 if (parent_lock_handle) {
774 parent_lock = ldlm_handle2lock(parent_lock_handle);
776 parent_res = parent_lock->l_resource;
779 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
783 lock = ldlm_lock_new(parent_lock, res);
784 ldlm_resource_putref(res);
785 if (parent_lock != NULL)
786 LDLM_LOCK_PUT(parent_lock);
791 lock->l_req_mode = mode;
793 lock->l_blocking_ast = blocking;
794 lock->l_completion_ast = completion;
799 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
800 struct ldlm_lock **lockp,
801 void *cookie, int cookie_len, int *flags)
803 struct ldlm_resource *res;
804 struct ldlm_lock *lock = *lockp;
806 ldlm_res_policy policy;
809 res = lock->l_resource;
811 if (res->lr_type == LDLM_EXTENT)
812 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
814 /* policies are not executed on the client or during replay */
815 local = res->lr_namespace->ns_client;
816 if (!local && !(*flags & LDLM_FL_REPLAY) &&
817 (policy = ldlm_res_policy_table[res->lr_type])) {
819 rc = policy(ns, lockp, cookie, lock->l_req_mode, *flags, NULL);
820 if (rc == ELDLM_LOCK_CHANGED) {
821 res = lock->l_resource;
822 *flags |= LDLM_FL_LOCK_CHANGED;
823 } else if (rc == ELDLM_LOCK_REPLACED) {
824 /* The lock that was returned has already been granted,
825 * and placed into lockp. Destroy the old one and our
826 * work here is done. */
827 ldlm_lock_destroy(lock);
829 *flags |= LDLM_FL_LOCK_CHANGED;
831 } else if (rc == ELDLM_LOCK_ABORTED) {
832 ldlm_lock_destroy(lock);
837 l_lock(&ns->ns_lock);
838 if (local && lock->l_req_mode == lock->l_granted_mode) {
839 /* The server returned a blocked lock, but it was granted before
840 * we got a chance to actually enqueue it. We don't need to do
842 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
843 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
847 /* This distinction between local lock trees is very important; a client
848 * namespace only has information about locks taken by that client, and
849 * thus doesn't have enough information to decide for itself if it can
850 * be granted (below). In this case, we do exactly what the server
851 * tells us to do, as dictated by the 'flags'.
853 * We do exactly the same thing during recovery, when the server is
854 * more or less trusting the clients not to lie.
856 * FIXME (bug 268): Detect obvious lies by checking compatibility in
857 * granted/converting queues. */
858 ldlm_resource_unlink_lock(lock);
860 if (*flags & LDLM_FL_BLOCK_CONV)
861 ldlm_resource_add_lock(res, &res->lr_converting, lock);
862 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
863 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
865 ldlm_grant_lock(lock, NULL, 0, 0);
867 } else if (*flags & LDLM_FL_REPLAY) {
868 if (*flags & LDLM_FL_BLOCK_CONV) {
869 ldlm_resource_add_lock(res, &res->lr_converting, lock);
871 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
872 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
874 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
875 ldlm_grant_lock(lock, NULL, 0, 0);
878 /* If no flags, fall through to normal enqueue path. */
881 /* FIXME: We may want to optimize by checking lr_most_restr */
882 if (!list_empty(&res->lr_converting)) {
883 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
884 *flags |= LDLM_FL_BLOCK_CONV;
887 if (!list_empty(&res->lr_waiting)) {
888 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
889 *flags |= LDLM_FL_BLOCK_WAIT;
892 if (!ldlm_lock_compat(lock, 0)) {
893 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
894 *flags |= LDLM_FL_BLOCK_GRANTED;
897 ldlm_grant_lock(lock, NULL, 0, 0);
900 l_unlock(&ns->ns_lock);
904 /* Must be called with namespace taken: queue is waiting or converting. */
905 static int ldlm_reprocess_queue(struct ldlm_resource *res,
906 struct list_head *queue)
908 struct list_head *tmp, *pos;
911 list_for_each_safe(tmp, pos, queue) {
912 struct ldlm_lock *pending;
913 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
915 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
917 if (!ldlm_lock_compat(pending, 1))
920 list_del_init(&pending->l_res_link);
921 ldlm_grant_lock(pending, NULL, 0, 1);
927 int ldlm_run_ast_work(struct list_head *rpc_list)
929 struct list_head *tmp, *pos;
933 list_for_each_safe(tmp, pos, rpc_list) {
934 struct ldlm_ast_work *w =
935 list_entry(tmp, struct ldlm_ast_work, w_list);
937 /* It's possible to receive a completion AST before we've set
938 * the l_completion_ast pointer: either because the AST arrived
939 * before the reply, or simply because there's a small race
940 * window between receiving the reply and finishing the local
943 * This can't happen with the blocking_ast, however, because we
944 * will never call the local blocking_ast until we drop our
945 * reader/writer reference, which we won't do until we get the
946 * reply and finish enqueueing. */
948 LASSERT(w->w_lock->l_blocking_ast != NULL);
949 rc = w->w_lock->l_blocking_ast
950 (w->w_lock, &w->w_desc, w->w_data,
952 } else if (w->w_lock->l_completion_ast != NULL) {
953 rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags,
961 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
962 "disconnect client\n");
963 LDLM_LOCK_PUT(w->w_lock);
964 list_del(&w->w_list);
965 OBD_FREE(w, sizeof(*w));
970 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
972 ldlm_reprocess_all(res);
973 return LDLM_ITER_CONTINUE;
976 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
978 (void)ldlm_namespace_foreach_res(ns, reprocess_one_queue, NULL);
981 void ldlm_reprocess_all(struct ldlm_resource *res)
983 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
987 /* Local lock trees don't get reprocessed. */
988 if (res->lr_namespace->ns_client) {
994 l_lock(&res->lr_namespace->ns_lock);
995 res->lr_tmp = &rpc_list;
997 ldlm_reprocess_queue(res, &res->lr_converting);
998 if (list_empty(&res->lr_converting))
999 ldlm_reprocess_queue(res, &res->lr_waiting);
1002 l_unlock(&res->lr_namespace->ns_lock);
1004 rc = ldlm_run_ast_work(&rpc_list);
1005 if (rc == -ERESTART)
1010 void ldlm_cancel_callback(struct ldlm_lock *lock)
1012 l_lock(&lock->l_resource->lr_namespace->ns_lock);
1013 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1014 lock->l_flags |= LDLM_FL_CANCEL;
1015 if (lock->l_blocking_ast) {
1016 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
1017 lock->l_blocking_ast(lock, NULL, lock->l_data,
1021 LDLM_DEBUG(lock, "no blocking ast");
1024 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
1027 void ldlm_lock_cancel(struct ldlm_lock *lock)
1029 struct ldlm_resource *res;
1030 struct ldlm_namespace *ns;
1033 /* There's no race between calling this and taking the ns lock below;
1034 * a lock can only be put on the waiting list once, because it can only
1035 * issue a blocking AST once. */
1036 ldlm_del_waiting_lock(lock);
1038 res = lock->l_resource;
1039 ns = res->lr_namespace;
1041 l_lock(&ns->ns_lock);
1042 /* Please do not, no matter how tempting, remove this LBUG without
1043 * talking to me first. -phik */
1044 if (lock->l_readers || lock->l_writers) {
1045 LDLM_DEBUG(lock, "lock still has references");
1046 ldlm_lock_dump(D_OTHER, lock, 0);
1050 ldlm_cancel_callback(lock); /* XXX FIXME bug 1030 */
1052 ldlm_resource_unlink_lock(lock);
1053 ldlm_lock_destroy(lock);
1054 l_unlock(&ns->ns_lock);
1058 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1060 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1066 lock->l_data = data;
1068 LDLM_LOCK_PUT(lock);
1073 /* This function is only called from one thread (per export); no locking around
1074 * the list ops needed */
1075 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1077 struct list_head *iter, *n;
1079 list_for_each_safe(iter, n, &exp->exp_ldlm_data.led_held_locks) {
1080 struct ldlm_lock *lock;
1081 struct ldlm_resource *res;
1082 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
1083 res = ldlm_resource_getref(lock->l_resource);
1084 LDLM_DEBUG(lock, "export %p", exp);
1085 ldlm_lock_cancel(lock);
1086 ldlm_reprocess_all(res);
1087 ldlm_resource_putref(res);
1091 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1094 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1095 struct ldlm_resource *res;
1096 struct ldlm_namespace *ns;
1102 res = lock->l_resource;
1103 ns = res->lr_namespace;
1105 l_lock(&ns->ns_lock);
1107 lock->l_req_mode = new_mode;
1108 ldlm_resource_unlink_lock(lock);
1110 /* If this is a local resource, put it on the appropriate list. */
1111 if (res->lr_namespace->ns_client) {
1112 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1113 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1115 /* This should never happen, because of the way the
1116 * server handles conversions. */
1119 res->lr_tmp = &rpc_list;
1120 ldlm_grant_lock(lock, NULL, 0, 0);
1123 /* FIXME: completion handling not with ns_lock held ! */
1124 if (lock->l_completion_ast)
1125 lock->l_completion_ast(lock, 0, NULL);
1128 /* FIXME: We should try the conversion right away and possibly
1129 * return success without the need for an extra AST */
1130 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1131 *flags |= LDLM_FL_BLOCK_CONV;
1134 l_unlock(&ns->ns_lock);
1137 ldlm_run_ast_work(&rpc_list);
1141 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1144 struct obd_device *obd = NULL;
1146 if (!((portal_debug | D_ERROR) & level))
1149 if (RES_VERSION_SIZE != 4)
1153 CDEBUG(level, " NULL LDLM lock\n");
1157 snprintf(ver, sizeof(ver), "%x %x %x %x",
1158 lock->l_version[0], lock->l_version[1],
1159 lock->l_version[2], lock->l_version[3]);
1161 CDEBUG(level, " -- Lock dump: %p (%s) (rc: %d) (pos: %d)\n", lock, ver,
1162 atomic_read(&lock->l_refc), pos);
1163 if (lock->l_connh != NULL)
1164 obd = class_conn2obd(lock->l_connh);
1165 if (lock->l_export && lock->l_export->exp_connection) {
1166 CDEBUG(level, " Node: NID "LPX64" on %s (rhandle: "LPX64")\n",
1167 lock->l_export->exp_connection->c_peer.peer_nid,
1168 lock->l_export->exp_connection->c_peer.peer_ni->pni_name,
1169 lock->l_remote_handle.cookie);
1170 } else if (obd == NULL) {
1171 CDEBUG(level, " Node: local\n");
1173 struct obd_import *imp = obd->u.cli.cl_import;
1174 CDEBUG(level, " Node: NID "LPX64" on %s (rhandle: "LPX64")\n",
1175 imp->imp_connection->c_peer.peer_nid,
1176 imp->imp_connection->c_peer.peer_ni->pni_name,
1177 lock->l_remote_handle.cookie);
1179 CDEBUG(level, " Resource: %p ("LPD64")\n", lock->l_resource,
1180 lock->l_resource->lr_name.name[0]);
1181 CDEBUG(level, " Req mode: %d, grant mode: %d, readers: %d, writers: "
1182 "%d\n", (int)lock->l_req_mode, (int)lock->l_granted_mode,
1183 lock->l_readers, lock->l_writers);
1184 if (lock->l_resource->lr_type == LDLM_EXTENT)
1185 CDEBUG(level, " Extent: "LPU64" -> "LPU64"\n",
1186 lock->l_extent.start, lock->l_extent.end);
1189 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1191 struct ldlm_lock *lock;
1193 lock = ldlm_handle2lock(lockh);
1197 ldlm_lock_dump(D_OTHER, lock, 0);
1199 LDLM_LOCK_PUT(lock);