1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
27 # include <linux/slab.h>
28 # include <linux/module.h>
29 # include <linux/lustre_dlm.h>
31 # include <liblustre.h>
32 # include <linux/kp30.h>
35 #include <linux/obd_class.h>
36 #include "ldlm_internal.h"
38 //struct lustre_lock ldlm_everything_lock;
41 char *ldlm_lockname[] = {
50 char *ldlm_typename[] = {
56 char *ldlm_it2str(int it)
63 case (IT_OPEN | IT_CREAT):
76 CERROR("Unknown intent %d\n", it);
81 extern kmem_cache_t *ldlm_lock_slab;
82 struct lustre_lock ldlm_handle_lock;
84 static ldlm_processing_policy ldlm_processing_policy_table[] = {
85 [LDLM_PLAIN] ldlm_process_plain_lock,
86 [LDLM_EXTENT] ldlm_process_extent_lock,
88 [LDLM_FLOCK] ldlm_process_flock_lock,
92 static ldlm_res_policy ldlm_intent_policy_func;
94 void ldlm_register_intent(ldlm_res_policy arg)
96 ldlm_intent_policy_func = arg;
99 void ldlm_unregister_intent(void)
101 ldlm_intent_policy_func = NULL;
105 * REFCOUNTED LOCK OBJECTS
110 * Lock refcounts, during creation:
111 * - one special one for allocation, dec'd only once in destroy
112 * - one for being a lock that's in-use
113 * - one for the addref associated with a new lock
115 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
117 atomic_inc(&lock->l_refc);
121 void ldlm_lock_put(struct ldlm_lock *lock)
125 if (atomic_dec_and_test(&lock->l_refc)) {
126 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
128 l_lock(&ns->ns_lock);
129 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
130 LASSERT(lock->l_destroyed);
131 LASSERT(list_empty(&lock->l_res_link));
133 spin_lock(&ns->ns_counter_lock);
135 spin_unlock(&ns->ns_counter_lock);
137 ldlm_resource_putref(lock->l_resource);
138 lock->l_resource = NULL;
140 class_export_put(lock->l_export);
143 LDLM_LOCK_PUT(lock->l_parent);
145 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
146 l_unlock(&ns->ns_lock);
152 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
155 l_lock(&lock->l_resource->lr_namespace->ns_lock);
156 if (!list_empty(&lock->l_lru)) {
157 list_del_init(&lock->l_lru);
158 lock->l_resource->lr_namespace->ns_nr_unused--;
159 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
161 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
165 /* This used to have a 'strict' flact, which recovery would use to mark an
166 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
167 * shall explain why it's gone: with the new hash table scheme, once you call
168 * ldlm_lock_destroy, you can never drop your final references on this lock.
169 * Because it's not in the hash table anymore. -phil */
170 void ldlm_lock_destroy(struct ldlm_lock *lock)
173 l_lock(&lock->l_resource->lr_namespace->ns_lock);
175 if (!list_empty(&lock->l_children)) {
176 LDLM_ERROR(lock, "still has children (%p)!",
177 lock->l_children.next);
178 ldlm_lock_dump(D_ERROR, lock, 0);
181 if (lock->l_readers || lock->l_writers) {
182 LDLM_ERROR(lock, "lock still has references");
183 ldlm_lock_dump(D_ERROR, lock, 0);
187 if (!list_empty(&lock->l_res_link)) {
188 ldlm_lock_dump(D_ERROR, lock, 0);
192 if (lock->l_destroyed) {
193 LASSERT(list_empty(&lock->l_lru));
194 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
198 lock->l_destroyed = 1;
200 list_del_init(&lock->l_export_chain);
201 ldlm_lock_remove_from_lru(lock);
202 class_handle_unhash(&lock->l_handle);
205 /* Wake anyone waiting for this lock */
206 /* FIXME: I should probably add yet another flag, instead of using
207 * l_export to only call this on clients */
209 class_export_put(lock->l_export);
210 lock->l_export = NULL;
211 if (lock->l_export && lock->l_completion_ast)
212 lock->l_completion_ast(lock, 0);
215 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
220 /* this is called by portals_handle2object with the handle lock taken */
221 static void lock_handle_addref(void *lock)
223 LDLM_LOCK_GET((struct ldlm_lock *)lock);
227 * usage: pass in a resource on which you have done ldlm_resource_get
228 * pass in a parent lock on which you have done a ldlm_lock_get
229 * after return, ldlm_*_put the resource and parent
230 * returns: lock with refcount 1
232 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
233 struct ldlm_resource *resource)
235 struct ldlm_lock *lock;
238 if (resource == NULL)
241 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, SLAB_NOFS, sizeof(*lock));
245 lock->l_resource = ldlm_resource_getref(resource);
247 atomic_set(&lock->l_refc, 2);
248 INIT_LIST_HEAD(&lock->l_children);
249 INIT_LIST_HEAD(&lock->l_res_link);
250 INIT_LIST_HEAD(&lock->l_lru);
251 INIT_LIST_HEAD(&lock->l_export_chain);
252 INIT_LIST_HEAD(&lock->l_pending_chain);
253 init_waitqueue_head(&lock->l_waitq);
255 spin_lock(&resource->lr_namespace->ns_counter_lock);
256 resource->lr_namespace->ns_locks++;
257 spin_unlock(&resource->lr_namespace->ns_counter_lock);
259 if (parent != NULL) {
260 l_lock(&parent->l_resource->lr_namespace->ns_lock);
261 lock->l_parent = LDLM_LOCK_GET(parent);
262 list_add(&lock->l_childof, &parent->l_children);
263 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
266 INIT_LIST_HEAD(&lock->l_handle.h_link);
267 class_handle_hash(&lock->l_handle, lock_handle_addref);
272 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
273 struct ldlm_res_id new_resid)
275 struct ldlm_resource *oldres = lock->l_resource;
278 l_lock(&ns->ns_lock);
279 if (memcmp(&new_resid, &lock->l_resource->lr_name,
280 sizeof(lock->l_resource->lr_name)) == 0) {
282 l_unlock(&ns->ns_lock);
286 LASSERT(new_resid.name[0] != 0);
288 /* This function assumes that the lock isn't on any lists */
289 LASSERT(list_empty(&lock->l_res_link));
291 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
292 lock->l_resource->lr_type, 1);
293 if (lock->l_resource == NULL) {
298 /* ...and the flowers are still standing! */
299 ldlm_resource_putref(oldres);
301 l_unlock(&ns->ns_lock);
309 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
311 lockh->cookie = lock->l_handle.h_cookie;
314 /* if flags: atomically get the lock and set the flags.
315 * Return NULL if flag already set
318 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
320 struct ldlm_namespace *ns;
321 struct ldlm_lock *lock = NULL, *retval = NULL;
326 lock = class_handle2object(handle->cookie);
330 LASSERT(lock->l_resource != NULL);
331 ns = lock->l_resource->lr_namespace;
334 l_lock(&ns->ns_lock);
336 /* It's unlikely but possible that someone marked the lock as
337 * destroyed after we did handle2object on it */
338 if (lock->l_destroyed) {
339 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
344 if (flags && (lock->l_flags & flags)) {
350 lock->l_flags |= flags;
355 l_unlock(&ns->ns_lock);
359 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
360 struct lustre_handle *handle)
362 struct ldlm_lock *retval = NULL;
364 l_lock(&ns->ns_lock);
365 retval = __ldlm_handle2lock(handle, 0);
366 l_unlock(&ns->ns_lock);
371 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
373 ldlm_res2desc(lock->l_resource, &desc->l_resource);
374 desc->l_req_mode = lock->l_req_mode;
375 desc->l_granted_mode = lock->l_granted_mode;
376 memcpy(&desc->l_policy_data, &lock->l_policy_data,
377 sizeof(desc->l_policy_data));
378 memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
381 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
382 void *data, int datalen)
384 struct ldlm_ast_work *w;
387 l_lock(&lock->l_resource->lr_namespace->ns_lock);
388 if (new && (lock->l_flags & LDLM_FL_AST_SENT))
391 CDEBUG(D_OTHER, "lock %p incompatible; sending blocking AST.\n", lock);
393 OBD_ALLOC(w, sizeof(*w));
400 w->w_datalen = datalen;
402 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
403 lock->l_flags |= LDLM_FL_AST_SENT;
404 /* If the enqueuing client said so, tell the AST recipient to
405 * discard dirty data, rather than writing back. */
406 if (new->l_flags & LDLM_AST_DISCARD_DATA)
407 lock->l_flags |= LDLM_FL_DISCARD_DATA;
409 ldlm_lock2desc(new, &w->w_desc);
412 w->w_lock = LDLM_LOCK_GET(lock);
413 list_add(&w->w_list, lock->l_resource->lr_tmp);
416 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
419 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
421 struct ldlm_lock *lock;
423 lock = ldlm_handle2lock(lockh);
424 ldlm_lock_addref_internal(lock, mode);
428 /* only called for local locks */
429 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
431 l_lock(&lock->l_resource->lr_namespace->ns_lock);
432 ldlm_lock_remove_from_lru(lock);
433 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
437 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
439 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
442 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
444 struct ldlm_namespace *ns;
447 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
448 ns = lock->l_resource->lr_namespace;
449 l_lock(&ns->ns_lock);
450 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR) {
451 LASSERT(lock->l_readers > 0);
454 LASSERT(lock->l_writers > 0);
458 if (lock->l_flags & LDLM_FL_LOCAL &&
459 !lock->l_readers && !lock->l_writers) {
460 /* If this is a local lock on a server namespace and this was
461 * the last reference, cancel the lock. */
462 CDEBUG(D_INFO, "forcing cancel of local lock\n");
463 lock->l_flags |= LDLM_FL_CBPENDING;
466 if (!lock->l_readers && !lock->l_writers &&
467 (lock->l_flags & LDLM_FL_CBPENDING)) {
468 /* If we received a blocked AST and this was the last reference,
469 * run the callback. */
470 if (!ns->ns_client && lock->l_export)
471 CERROR("FL_CBPENDING set on non-local lock--just a "
474 LDLM_DEBUG(lock, "final decref done on cbpending lock");
475 l_unlock(&ns->ns_lock);
477 l_check_no_ns_lock(ns);
478 /* FIXME: need a real 'desc' here */
479 if (lock->l_blocking_ast != NULL)
480 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
482 } else if (ns->ns_client && !lock->l_readers && !lock->l_writers) {
483 /* If this is a client-side namespace and this was the last
484 * reference, put it on the LRU. */
485 LASSERT(list_empty(&lock->l_lru));
486 LASSERT(ns->ns_nr_unused >= 0);
487 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
489 l_unlock(&ns->ns_lock);
492 l_unlock(&ns->ns_lock);
495 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
500 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
502 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
503 LASSERT(lock != NULL);
504 ldlm_lock_decref_internal(lock, mode);
508 /* This will drop a lock reference and mark it for destruction, but will not
509 * necessarily cancel the lock before returning. */
510 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
512 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
515 LASSERT(lock != NULL);
517 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
518 l_lock(&lock->l_resource->lr_namespace->ns_lock);
519 lock->l_flags |= LDLM_FL_CBPENDING;
520 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
521 ldlm_lock_decref_internal(lock, mode);
526 * - ldlm_lock_enqueue
527 * - ldlm_reprocess_queue
528 * - ldlm_lock_convert
530 void ldlm_grant_lock(struct ldlm_lock *lock, void *data, int datalen,
533 struct ldlm_resource *res = lock->l_resource;
536 l_lock(&lock->l_resource->lr_namespace->ns_lock);
537 lock->l_granted_mode = lock->l_req_mode;
538 ldlm_resource_add_lock(res, &res->lr_granted, lock);
540 if (lock->l_granted_mode < res->lr_most_restr)
541 res->lr_most_restr = lock->l_granted_mode;
543 if (run_ast && lock->l_completion_ast != NULL)
544 ldlm_add_ast_work_item(lock, NULL, data, datalen);
546 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
550 /* returns a referenced lock or NULL. See the flag descriptions below, in the
551 * comment above ldlm_lock_match */
552 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
553 struct ldlm_extent *extent,
554 struct ldlm_lock *old_lock, int flags)
556 struct ldlm_lock *lock;
557 struct list_head *tmp;
559 list_for_each(tmp, queue) {
560 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
562 if (lock == old_lock)
565 /* llite sometimes wants to match locks that will be
566 * canceled when their users drop, but we allow it to match
567 * if it passes in CBPENDING and the lock still has users.
568 * this is generally only going to be used by children
569 * whose parents already hold a lock so forward progress
570 * can still happen. */
571 if (lock->l_flags & LDLM_FL_CBPENDING &&
572 !(flags & LDLM_FL_CBPENDING))
574 if (lock->l_flags & LDLM_FL_CBPENDING &&
575 lock->l_readers == 0 && lock->l_writers == 0)
578 if (lock->l_req_mode != mode)
581 if (lock->l_resource->lr_type == LDLM_EXTENT &&
582 (lock->l_policy_data.l_extent.start > extent->start ||
583 lock->l_policy_data.l_extent.end < extent->end))
586 if (lock->l_destroyed)
589 if ((flags & LDLM_FL_LOCAL_ONLY) &&
590 !(lock->l_flags & LDLM_FL_LOCAL))
593 ldlm_lock_addref_internal(lock, mode);
600 /* Can be called in two ways:
602 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
603 * for a duplicate of.
605 * Otherwise, all of the fields must be filled in, to match against.
607 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
608 * server (ie, connh is NULL)
609 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
610 * list will be considered
611 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
612 * to be canceled can still be matched as long as they still have reader
613 * or writer refernces
615 * Returns 1 if it finds an already-existing lock that is compatible; in this
616 * case, lockh is filled in with a addref()ed lock
618 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
619 struct ldlm_res_id *res_id, __u32 type, void *cookie,
620 int cookielen, ldlm_mode_t mode,
621 struct lustre_handle *lockh)
623 struct ldlm_resource *res;
624 struct ldlm_lock *lock, *old_lock = NULL;
629 old_lock = ldlm_handle2lock(lockh);
632 ns = old_lock->l_resource->lr_namespace;
633 res_id = &old_lock->l_resource->lr_name;
634 type = old_lock->l_resource->lr_type;
635 mode = old_lock->l_req_mode;
638 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
640 LASSERT(old_lock == NULL);
644 l_lock(&ns->ns_lock);
646 lock = search_queue(&res->lr_granted, mode, cookie, old_lock, flags);
649 if (flags & LDLM_FL_BLOCK_GRANTED)
651 lock = search_queue(&res->lr_converting, mode, cookie, old_lock, flags);
654 lock = search_queue(&res->lr_waiting, mode, cookie, old_lock, flags);
660 ldlm_resource_putref(res);
661 l_unlock(&ns->ns_lock);
664 ldlm_lock2handle(lock, lockh);
665 if (lock->l_completion_ast)
666 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
670 LDLM_DEBUG(lock, "matched");
672 LDLM_DEBUG_NOLOCK("not matched");
675 LDLM_LOCK_PUT(old_lock);
680 /* Returns a referenced lock */
681 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
682 struct lustre_handle *parent_lock_handle,
683 struct ldlm_res_id res_id, __u32 type,
685 ldlm_blocking_callback blocking,
686 ldlm_completion_callback completion,
689 struct ldlm_resource *res, *parent_res = NULL;
690 struct ldlm_lock *lock, *parent_lock = NULL;
693 if (parent_lock_handle) {
694 parent_lock = ldlm_handle2lock(parent_lock_handle);
696 parent_res = parent_lock->l_resource;
699 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
703 lock = ldlm_lock_new(parent_lock, res);
704 ldlm_resource_putref(res);
705 if (parent_lock != NULL)
706 LDLM_LOCK_PUT(parent_lock);
711 lock->l_req_mode = mode;
712 lock->l_ast_data = data;
713 lock->l_blocking_ast = blocking;
714 lock->l_completion_ast = completion;
719 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
720 struct ldlm_lock **lockp,
721 void *cookie, int cookie_len, int *flags)
723 struct ldlm_lock *lock = *lockp;
724 struct ldlm_resource *res = lock->l_resource;
725 int local = res->lr_namespace->ns_client;
726 ldlm_processing_policy policy;
727 ldlm_error_t rc = ELDLM_OK;
730 if (res->lr_type != LDLM_PLAIN)
731 memcpy(&lock->l_policy_data, cookie, cookie_len);
733 /* policies are not executed on the client or during replay */
734 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
735 && !local && ldlm_intent_policy_func) {
736 rc = ldlm_intent_policy_func(ns, lockp, cookie,
737 lock->l_req_mode, *flags, NULL);
738 if (rc == ELDLM_LOCK_REPLACED) {
739 /* The lock that was returned has already been granted,
740 * and placed into lockp. Destroy the old one and our
741 * work here is done. */
742 ldlm_lock_destroy(lock);
744 *flags |= LDLM_FL_LOCK_CHANGED;
746 } else if (rc == ELDLM_LOCK_ABORTED ||
747 (rc == 0 && (*flags & LDLM_FL_INTENT_ONLY))) {
748 ldlm_lock_destroy(lock);
751 LASSERT(rc == ELDLM_OK);
754 l_lock(&ns->ns_lock);
755 if (local && lock->l_req_mode == lock->l_granted_mode) {
756 /* The server returned a blocked lock, but it was granted before
757 * we got a chance to actually enqueue it. We don't need to do
759 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
760 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
764 /* Some flags from the enqueue want to make it into the AST, via the
766 lock->l_flags |= (*flags & LDLM_AST_DISCARD_DATA);
768 /* This distinction between local lock trees is very important; a client
769 * namespace only has information about locks taken by that client, and
770 * thus doesn't have enough information to decide for itself if it can
771 * be granted (below). In this case, we do exactly what the server
772 * tells us to do, as dictated by the 'flags'.
774 * We do exactly the same thing during recovery, when the server is
775 * more or less trusting the clients not to lie.
777 * FIXME (bug 268): Detect obvious lies by checking compatibility in
778 * granted/converting queues. */
779 ldlm_resource_unlink_lock(lock);
781 if (*flags & LDLM_FL_BLOCK_CONV)
782 ldlm_resource_add_lock(res, &res->lr_converting, lock);
783 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
784 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
786 ldlm_grant_lock(lock, NULL, 0, 0);
788 } else if (*flags & LDLM_FL_REPLAY) {
789 if (*flags & LDLM_FL_BLOCK_CONV) {
790 ldlm_resource_add_lock(res, &res->lr_converting, lock);
792 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
793 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
795 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
796 ldlm_grant_lock(lock, NULL, 0, 0);
799 /* If no flags, fall through to normal enqueue path. */
802 policy = ldlm_processing_policy_table[res->lr_type];
803 policy(lock, flags, 1, &rc);
806 l_unlock(&ns->ns_lock);
810 /* Must be called with namespace taken: queue is waiting or converting. */
811 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue)
813 struct list_head *tmp, *pos;
814 ldlm_processing_policy policy;
816 int rc = LDLM_ITER_CONTINUE;
820 policy = ldlm_processing_policy_table[res->lr_type];
823 list_for_each_safe(tmp, pos, queue) {
824 struct ldlm_lock *pending;
825 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
827 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
830 rc = policy(pending, &flags, 0, &err);
831 if (rc != LDLM_ITER_CONTINUE)
838 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list)
840 struct list_head *tmp, *pos;
844 l_check_no_ns_lock(ns);
846 list_for_each_safe(tmp, pos, rpc_list) {
847 struct ldlm_ast_work *w =
848 list_entry(tmp, struct ldlm_ast_work, w_list);
850 /* It's possible to receive a completion AST before we've set
851 * the l_completion_ast pointer: either because the AST arrived
852 * before the reply, or simply because there's a small race
853 * window between receiving the reply and finishing the local
856 * This can't happen with the blocking_ast, however, because we
857 * will never call the local blocking_ast until we drop our
858 * reader/writer reference, which we won't do until we get the
859 * reply and finish enqueueing. */
860 LASSERT(w->w_lock != NULL);
862 LASSERT(w->w_lock->l_blocking_ast != NULL);
863 rc = w->w_lock->l_blocking_ast
864 (w->w_lock, &w->w_desc, w->w_data,
866 } else if (w->w_lock->l_completion_ast != NULL) {
867 LASSERT(w->w_lock->l_completion_ast != NULL);
868 rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags,
876 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
877 "disconnect client\n");
878 LDLM_LOCK_PUT(w->w_lock);
879 list_del(&w->w_list);
880 OBD_FREE(w, sizeof(*w));
885 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
887 ldlm_reprocess_all(res);
888 return LDLM_ITER_CONTINUE;
891 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
893 (void)ldlm_namespace_foreach_res(ns, reprocess_one_queue, NULL);
896 void ldlm_reprocess_all(struct ldlm_resource *res)
898 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
902 /* Local lock trees don't get reprocessed. */
903 if (res->lr_namespace->ns_client) {
909 l_lock(&res->lr_namespace->ns_lock);
910 res->lr_tmp = &rpc_list;
912 rc = ldlm_reprocess_queue(res, &res->lr_converting);
913 if (rc == LDLM_ITER_CONTINUE)
914 ldlm_reprocess_queue(res, &res->lr_waiting);
917 l_unlock(&res->lr_namespace->ns_lock);
919 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
920 if (rc == -ERESTART) {
921 LASSERT(list_empty(&rpc_list));
927 void ldlm_cancel_callback(struct ldlm_lock *lock)
929 l_lock(&lock->l_resource->lr_namespace->ns_lock);
930 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
931 lock->l_flags |= LDLM_FL_CANCEL;
932 if (lock->l_blocking_ast) {
933 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
934 // l_check_no_ns_lock(lock->l_resource->lr_namespace);
935 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
939 LDLM_DEBUG(lock, "no blocking ast");
942 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
945 void ldlm_lock_cancel(struct ldlm_lock *lock)
947 struct ldlm_resource *res;
948 struct ldlm_namespace *ns;
951 /* There's no race between calling this and taking the ns lock below;
952 * a lock can only be put on the waiting list once, because it can only
953 * issue a blocking AST once. */
954 ldlm_del_waiting_lock(lock);
956 res = lock->l_resource;
957 ns = res->lr_namespace;
959 l_lock(&ns->ns_lock);
960 /* Please do not, no matter how tempting, remove this LBUG without
961 * talking to me first. -phik */
962 if (lock->l_readers || lock->l_writers) {
963 LDLM_DEBUG(lock, "lock still has references");
964 ldlm_lock_dump(D_OTHER, lock, 0);
968 ldlm_cancel_callback(lock);
970 ldlm_resource_unlink_lock(lock);
971 ldlm_lock_destroy(lock);
972 l_unlock(&ns->ns_lock);
976 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
978 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
984 lock->l_ast_data = data;
989 void ldlm_cancel_locks_for_export(struct obd_export *exp)
991 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
992 struct ldlm_lock *lock;
993 struct ldlm_resource *res;
995 l_lock(&ns->ns_lock);
996 while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) {
997 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
998 struct ldlm_lock, l_export_chain);
999 res = ldlm_resource_getref(lock->l_resource);
1000 LDLM_DEBUG(lock, "export %p", exp);
1001 ldlm_lock_cancel(lock);
1002 l_unlock(&ns->ns_lock);
1003 ldlm_reprocess_all(res);
1004 ldlm_resource_putref(res);
1005 l_lock(&ns->ns_lock);
1007 l_unlock(&ns->ns_lock);
1010 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1013 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1014 struct ldlm_resource *res;
1015 struct ldlm_namespace *ns;
1021 res = lock->l_resource;
1022 ns = res->lr_namespace;
1024 l_lock(&ns->ns_lock);
1026 lock->l_req_mode = new_mode;
1027 ldlm_resource_unlink_lock(lock);
1029 /* If this is a local resource, put it on the appropriate list. */
1030 if (res->lr_namespace->ns_client) {
1031 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1032 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1034 /* This should never happen, because of the way the
1035 * server handles conversions. */
1038 res->lr_tmp = &rpc_list;
1039 ldlm_grant_lock(lock, NULL, 0, 0);
1042 /* FIXME: completion handling not with ns_lock held ! */
1043 if (lock->l_completion_ast)
1044 lock->l_completion_ast(lock, 0, NULL);
1047 /* FIXME: We should try the conversion right away and possibly
1048 * return success without the need for an extra AST */
1049 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1050 *flags |= LDLM_FL_BLOCK_CONV;
1053 l_unlock(&ns->ns_lock);
1056 ldlm_run_ast_work(ns, &rpc_list);
1060 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1063 char str[PTL_NALFMT_SIZE];
1064 struct obd_device *obd = NULL;
1066 if (!((portal_debug | D_ERROR) & level))
1069 if (RES_VERSION_SIZE != 4)
1073 CDEBUG(level, " NULL LDLM lock\n");
1077 snprintf(ver, sizeof(ver), "%x %x %x %x",
1078 lock->l_version[0], lock->l_version[1],
1079 lock->l_version[2], lock->l_version[3]);
1081 CDEBUG(level, " -- Lock dump: %p/"LPX64" (%s) (rc: %d) (pos: %d)\n",
1082 lock, lock->l_handle.h_cookie, ver, atomic_read(&lock->l_refc),
1084 if (lock->l_conn_export != NULL)
1085 obd = lock->l_conn_export->exp_obd;
1086 if (lock->l_export && lock->l_export->exp_connection) {
1087 CDEBUG(level, " Node: NID "LPX64" (%s) on %s (rhandle: "LPX64")\n",
1088 lock->l_export->exp_connection->c_peer.peer_nid,
1089 portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
1090 lock->l_export->exp_connection->c_peer.peer_nid, str),
1091 lock->l_export->exp_connection->c_peer.peer_ni->pni_name,
1092 lock->l_remote_handle.cookie);
1093 } else if (obd == NULL) {
1094 CDEBUG(level, " Node: local\n");
1096 struct obd_import *imp = obd->u.cli.cl_import;
1097 CDEBUG(level, " Node: NID "LPX64" (%s) on %s (rhandle: "LPX64")\n",
1098 imp->imp_connection->c_peer.peer_nid,
1099 portals_nid2str(imp->imp_connection->c_peer.peer_ni->pni_number,
1100 imp->imp_connection->c_peer.peer_nid, str),
1101 imp->imp_connection->c_peer.peer_ni->pni_name,
1102 lock->l_remote_handle.cookie);
1104 CDEBUG(level, " Resource: %p ("LPU64"/"LPU64")\n", lock->l_resource,
1105 lock->l_resource->lr_name.name[0],
1106 lock->l_resource->lr_name.name[1]);
1107 CDEBUG(level, " Req mode: %d, grant mode: %d, rc: %u, read: %d, "
1108 "write: %d\n", (int)lock->l_req_mode, (int)lock->l_granted_mode,
1109 atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers);
1110 if (lock->l_resource->lr_type == LDLM_EXTENT)
1111 CDEBUG(level, " Extent: "LPU64" -> "LPU64"\n",
1112 lock->l_policy_data.l_extent.start,
1113 lock->l_policy_data.l_extent.end);
1114 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1115 CDEBUG(level, " Pid: %d Extent: "LPU64" -> "LPU64"\n",
1116 lock->l_policy_data.l_flock.pid,
1117 lock->l_policy_data.l_flock.start,
1118 lock->l_policy_data.l_flock.end);
1121 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1123 struct ldlm_lock *lock;
1125 lock = ldlm_handle2lock(lockh);
1129 ldlm_lock_dump(D_OTHER, lock, 0);
1131 LDLM_LOCK_PUT(lock);