1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
27 # include <linux/slab.h>
28 # include <linux/module.h>
29 # include <linux/lustre_dlm.h>
31 # include <liblustre.h>
32 # include <linux/kp30.h>
35 #include <linux/obd_class.h>
36 #include "ldlm_internal.h"
38 //struct lustre_lock ldlm_everything_lock;
41 char *ldlm_lockname[] = {
51 char *ldlm_typename[] = {
58 char *ldlm_it2str(int it)
65 case (IT_OPEN | IT_CREAT):
80 CERROR("Unknown intent %d\n", it);
85 extern kmem_cache_t *ldlm_lock_slab;
86 struct lustre_lock ldlm_handle_lock;
88 static ldlm_processing_policy ldlm_processing_policy_table[] = {
89 [LDLM_PLAIN] ldlm_process_plain_lock,
90 [LDLM_EXTENT] ldlm_process_extent_lock,
92 [LDLM_FLOCK] ldlm_process_flock_lock,
94 [LDLM_IBITS] ldlm_process_inodebits_lock,
97 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
99 return ldlm_processing_policy_table[res->lr_type];
102 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
108 * REFCOUNTED LOCK OBJECTS
113 * Lock refcounts, during creation:
114 * - one special one for allocation, dec'd only once in destroy
115 * - one for being a lock that's in-use
116 * - one for the addref associated with a new lock
118 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
120 atomic_inc(&lock->l_refc);
124 void ldlm_lock_put(struct ldlm_lock *lock)
128 if (atomic_dec_and_test(&lock->l_refc)) {
129 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
131 l_lock(&ns->ns_lock);
132 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
133 LASSERT(lock->l_destroyed);
134 LASSERT(list_empty(&lock->l_res_link));
136 spin_lock(&ns->ns_counter_lock);
138 spin_unlock(&ns->ns_counter_lock);
140 ldlm_resource_putref(lock->l_resource);
141 lock->l_resource = NULL;
143 class_export_put(lock->l_export);
146 LDLM_LOCK_PUT(lock->l_parent);
148 if (lock->l_lvb_data != NULL)
149 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
151 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
152 l_unlock(&ns->ns_lock);
158 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
161 l_lock(&lock->l_resource->lr_namespace->ns_lock);
162 if (!list_empty(&lock->l_lru)) {
163 list_del_init(&lock->l_lru);
164 lock->l_resource->lr_namespace->ns_nr_unused--;
165 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
167 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
171 /* This used to have a 'strict' flact, which recovery would use to mark an
172 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
173 * shall explain why it's gone: with the new hash table scheme, once you call
174 * ldlm_lock_destroy, you can never drop your final references on this lock.
175 * Because it's not in the hash table anymore. -phil */
176 void ldlm_lock_destroy(struct ldlm_lock *lock)
179 l_lock(&lock->l_resource->lr_namespace->ns_lock);
181 if (!list_empty(&lock->l_children)) {
182 LDLM_ERROR(lock, "still has children (%p)!",
183 lock->l_children.next);
184 ldlm_lock_dump(D_ERROR, lock, 0);
187 if (lock->l_readers || lock->l_writers) {
188 LDLM_ERROR(lock, "lock still has references");
189 ldlm_lock_dump(D_ERROR, lock, 0);
193 if (!list_empty(&lock->l_res_link)) {
194 LDLM_ERROR(lock, "lock still on resource");
195 ldlm_lock_dump(D_ERROR, lock, 0);
199 if (lock->l_destroyed) {
200 LASSERT(list_empty(&lock->l_lru));
201 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
205 lock->l_destroyed = 1;
207 list_del_init(&lock->l_export_chain);
208 ldlm_lock_remove_from_lru(lock);
209 class_handle_unhash(&lock->l_handle);
212 /* Wake anyone waiting for this lock */
213 /* FIXME: I should probably add yet another flag, instead of using
214 * l_export to only call this on clients */
216 class_export_put(lock->l_export);
217 lock->l_export = NULL;
218 if (lock->l_export && lock->l_completion_ast)
219 lock->l_completion_ast(lock, 0);
222 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
227 /* this is called by portals_handle2object with the handle lock taken */
228 static void lock_handle_addref(void *lock)
230 LDLM_LOCK_GET((struct ldlm_lock *)lock);
234 * usage: pass in a resource on which you have done ldlm_resource_get
235 * pass in a parent lock on which you have done a ldlm_lock_get
236 * after return, ldlm_*_put the resource and parent
237 * returns: lock with refcount 2 - one for current caller and one for remote
239 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
240 struct ldlm_resource *resource)
242 struct ldlm_lock *lock;
245 if (resource == NULL)
248 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, SLAB_NOFS, sizeof(*lock));
252 lock->l_resource = ldlm_resource_getref(resource);
254 atomic_set(&lock->l_refc, 2);
255 INIT_LIST_HEAD(&lock->l_children);
256 INIT_LIST_HEAD(&lock->l_res_link);
257 INIT_LIST_HEAD(&lock->l_lru);
258 INIT_LIST_HEAD(&lock->l_export_chain);
259 INIT_LIST_HEAD(&lock->l_pending_chain);
260 init_waitqueue_head(&lock->l_waitq);
262 spin_lock(&resource->lr_namespace->ns_counter_lock);
263 resource->lr_namespace->ns_locks++;
264 spin_unlock(&resource->lr_namespace->ns_counter_lock);
266 if (parent != NULL) {
267 l_lock(&parent->l_resource->lr_namespace->ns_lock);
268 lock->l_parent = LDLM_LOCK_GET(parent);
269 list_add(&lock->l_childof, &parent->l_children);
270 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
273 INIT_LIST_HEAD(&lock->l_handle.h_link);
274 class_handle_hash(&lock->l_handle, lock_handle_addref);
279 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
280 struct ldlm_res_id new_resid)
282 struct ldlm_resource *oldres = lock->l_resource;
285 l_lock(&ns->ns_lock);
286 if (memcmp(&new_resid, &lock->l_resource->lr_name,
287 sizeof(lock->l_resource->lr_name)) == 0) {
289 l_unlock(&ns->ns_lock);
293 LASSERT(new_resid.name[0] != 0);
295 /* This function assumes that the lock isn't on any lists */
296 LASSERT(list_empty(&lock->l_res_link));
298 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
299 lock->l_resource->lr_type, 1);
300 if (lock->l_resource == NULL) {
305 /* ...and the flowers are still standing! */
306 ldlm_resource_putref(oldres);
308 l_unlock(&ns->ns_lock);
316 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
318 lockh->cookie = lock->l_handle.h_cookie;
321 /* if flags: atomically get the lock and set the flags.
322 * Return NULL if flag already set
325 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
327 struct ldlm_namespace *ns;
328 struct ldlm_lock *lock = NULL, *retval = NULL;
333 lock = class_handle2object(handle->cookie);
337 LASSERT(lock->l_resource != NULL);
338 ns = lock->l_resource->lr_namespace;
341 l_lock(&ns->ns_lock);
343 /* It's unlikely but possible that someone marked the lock as
344 * destroyed after we did handle2object on it */
345 if (lock->l_destroyed) {
346 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
351 if (flags && (lock->l_flags & flags)) {
357 lock->l_flags |= flags;
362 l_unlock(&ns->ns_lock);
366 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
367 struct lustre_handle *handle)
369 struct ldlm_lock *retval = NULL;
371 l_lock(&ns->ns_lock);
372 retval = __ldlm_handle2lock(handle, 0);
373 l_unlock(&ns->ns_lock);
378 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
380 ldlm_res2desc(lock->l_resource, &desc->l_resource);
381 desc->l_req_mode = lock->l_req_mode;
382 desc->l_granted_mode = lock->l_granted_mode;
383 memcpy(&desc->l_policy_data, &lock->l_policy_data,
384 sizeof(desc->l_policy_data));
387 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
388 void *data, int datalen)
390 struct ldlm_ast_work *w;
393 l_lock(&lock->l_resource->lr_namespace->ns_lock);
394 if (new && (lock->l_flags & LDLM_FL_AST_SENT))
397 CDEBUG(D_OTHER, "lock %p incompatible; sending blocking AST.\n", lock);
399 OBD_ALLOC(w, sizeof(*w));
406 w->w_datalen = datalen;
408 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
409 lock->l_flags |= LDLM_FL_AST_SENT;
410 /* If the enqueuing client said so, tell the AST recipient to
411 * discard dirty data, rather than writing back. */
412 if (new->l_flags & LDLM_AST_DISCARD_DATA)
413 lock->l_flags |= LDLM_FL_DISCARD_DATA;
415 ldlm_lock2desc(new, &w->w_desc);
418 w->w_lock = LDLM_LOCK_GET(lock);
419 list_add(&w->w_list, lock->l_resource->lr_tmp);
422 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
425 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
427 struct ldlm_lock *lock;
429 lock = ldlm_handle2lock(lockh);
430 ldlm_lock_addref_internal(lock, mode);
434 /* only called for local locks */
435 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
437 l_lock(&lock->l_resource->lr_namespace->ns_lock);
438 ldlm_lock_remove_from_lru(lock);
439 if (mode & (LCK_NL | LCK_CR | LCK_PR))
441 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
443 lock->l_last_used = jiffies;
444 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
446 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
449 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
451 struct ldlm_namespace *ns;
454 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
455 ns = lock->l_resource->lr_namespace;
456 l_lock(&ns->ns_lock);
457 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
458 LASSERT(lock->l_readers > 0);
461 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
462 LASSERT(lock->l_writers > 0);
466 if (lock->l_flags & LDLM_FL_LOCAL &&
467 !lock->l_readers && !lock->l_writers) {
468 /* If this is a local lock on a server namespace and this was
469 * the last reference, cancel the lock. */
470 CDEBUG(D_INFO, "forcing cancel of local lock\n");
471 lock->l_flags |= LDLM_FL_CBPENDING;
474 if (!lock->l_readers && !lock->l_writers &&
475 (lock->l_flags & LDLM_FL_CBPENDING)) {
476 /* If we received a blocked AST and this was the last reference,
477 * run the callback. */
478 if (ns->ns_client == LDLM_NAMESPACE_SERVER && lock->l_export)
479 CERROR("FL_CBPENDING set on non-local lock--just a "
482 LDLM_DEBUG(lock, "final decref done on cbpending lock");
484 LDLM_LOCK_GET(lock); /* dropped by bl thread */
485 ldlm_lock_remove_from_lru(lock);
486 l_unlock(&ns->ns_lock);
487 if (ldlm_bl_to_thread(ns, NULL, lock) != 0)
488 ldlm_handle_bl_callback(ns, NULL, lock);
489 } else if (ns->ns_client == LDLM_NAMESPACE_CLIENT &&
490 !lock->l_readers && !lock->l_writers) {
491 /* If this is a client-side namespace and this was the last
492 * reference, put it on the LRU. */
493 LASSERT(list_empty(&lock->l_lru));
494 LASSERT(ns->ns_nr_unused >= 0);
495 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
497 l_unlock(&ns->ns_lock);
498 ldlm_cancel_lru(ns, LDLM_ASYNC);
500 l_unlock(&ns->ns_lock);
503 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
508 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
510 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
511 LASSERT(lock != NULL);
512 ldlm_lock_decref_internal(lock, mode);
516 /* This will drop a lock reference and mark it for destruction, but will not
517 * necessarily cancel the lock before returning. */
518 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
520 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
523 LASSERT(lock != NULL);
525 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
526 l_lock(&lock->l_resource->lr_namespace->ns_lock);
527 lock->l_flags |= LDLM_FL_CBPENDING;
528 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
529 ldlm_lock_decref_internal(lock, mode);
534 * - ldlm_lock_enqueue
535 * - ldlm_reprocess_queue
536 * - ldlm_lock_convert
538 void ldlm_grant_lock(struct ldlm_lock *lock, void *data, int datalen,
541 struct ldlm_resource *res = lock->l_resource;
544 l_lock(&lock->l_resource->lr_namespace->ns_lock);
545 lock->l_granted_mode = lock->l_req_mode;
546 ldlm_resource_add_lock(res, &res->lr_granted, lock);
548 if (lock->l_granted_mode < res->lr_most_restr)
549 res->lr_most_restr = lock->l_granted_mode;
551 if (run_ast && lock->l_completion_ast != NULL)
552 ldlm_add_ast_work_item(lock, NULL, data, datalen);
554 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
558 /* returns a referenced lock or NULL. See the flag descriptions below, in the
559 * comment above ldlm_lock_match */
560 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
561 ldlm_policy_data_t *policy,
562 struct ldlm_lock *old_lock, int flags)
564 struct ldlm_lock *lock;
565 struct list_head *tmp;
567 list_for_each(tmp, queue) {
568 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
570 if (lock == old_lock)
573 /* llite sometimes wants to match locks that will be
574 * canceled when their users drop, but we allow it to match
575 * if it passes in CBPENDING and the lock still has users.
576 * this is generally only going to be used by children
577 * whose parents already hold a lock so forward progress
578 * can still happen. */
579 if (lock->l_flags & LDLM_FL_CBPENDING &&
580 !(flags & LDLM_FL_CBPENDING))
582 if (lock->l_flags & LDLM_FL_CBPENDING &&
583 lock->l_readers == 0 && lock->l_writers == 0)
586 if (!(lock->l_req_mode & mode))
589 if (lock->l_resource->lr_type == LDLM_EXTENT &&
590 (lock->l_policy_data.l_extent.start >
591 policy->l_extent.start ||
592 lock->l_policy_data.l_extent.end < policy->l_extent.end))
595 if (lock->l_resource->lr_type == LDLM_EXTENT &&
597 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
600 /* We match if we have existing lock with same or wider set
602 if (lock->l_resource->lr_type == LDLM_IBITS &&
603 ((lock->l_policy_data.l_inodebits.bits &
604 policy->l_inodebits.bits) !=
605 policy->l_inodebits.bits))
608 if (lock->l_destroyed)
611 if ((flags & LDLM_FL_LOCAL_ONLY) &&
612 !(lock->l_flags & LDLM_FL_LOCAL))
615 if (flags & LDLM_FL_TEST_LOCK)
618 ldlm_lock_addref_internal(lock, mode);
625 void ldlm_lock_allow_match(struct ldlm_lock *lock)
627 l_lock(&lock->l_resource->lr_namespace->ns_lock);
628 lock->l_flags |= LDLM_FL_CAN_MATCH;
629 wake_up(&lock->l_waitq);
630 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
633 /* Can be called in two ways:
635 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
636 * for a duplicate of.
638 * Otherwise, all of the fields must be filled in, to match against.
640 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
641 * server (ie, connh is NULL)
642 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
643 * list will be considered
644 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
645 * to be canceled can still be matched as long as they still have reader
646 * or writer refernces
647 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
648 * just tell us if we would have matched.
650 * Returns 1 if it finds an already-existing lock that is compatible; in this
651 * case, lockh is filled in with a addref()ed lock
653 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
654 struct ldlm_res_id *res_id, __u32 type,
655 ldlm_policy_data_t *policy, ldlm_mode_t mode,
656 struct lustre_handle *lockh)
658 struct ldlm_resource *res;
659 struct ldlm_lock *lock, *old_lock = NULL;
664 old_lock = ldlm_handle2lock(lockh);
667 ns = old_lock->l_resource->lr_namespace;
668 res_id = &old_lock->l_resource->lr_name;
669 type = old_lock->l_resource->lr_type;
670 mode = old_lock->l_req_mode;
673 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
675 LASSERT(old_lock == NULL);
679 l_lock(&ns->ns_lock);
681 lock = search_queue(&res->lr_granted, mode, policy, old_lock, flags);
684 if (flags & LDLM_FL_BLOCK_GRANTED)
686 lock = search_queue(&res->lr_converting, mode, policy, old_lock, flags);
689 lock = search_queue(&res->lr_waiting, mode, policy, old_lock, flags);
695 ldlm_resource_putref(res);
696 l_unlock(&ns->ns_lock);
699 ldlm_lock2handle(lock, lockh);
700 if (!(lock->l_flags & LDLM_FL_CAN_MATCH)) {
701 struct l_wait_info lwi;
702 if (lock->l_completion_ast)
703 lock->l_completion_ast(lock,
704 LDLM_FL_WAIT_NOREPROC,
707 lwi = LWI_TIMEOUT_INTR(obd_timeout*HZ, NULL,NULL,NULL);
709 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
710 l_wait_event(lock->l_waitq,
711 (lock->l_flags & LDLM_FL_CAN_MATCH), &lwi);
715 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
716 type == LDLM_PLAIN ? res_id->name[2] :
717 policy->l_extent.start,
718 type == LDLM_PLAIN ? res_id->name[3] :
719 policy->l_extent.end);
720 else if (!(flags & LDLM_FL_TEST_LOCK)) /* less verbose for test-only */
721 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
722 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
723 type, mode, res_id->name[0], res_id->name[1],
724 type == LDLM_PLAIN ? res_id->name[2] :
725 policy->l_extent.start,
726 type == LDLM_PLAIN ? res_id->name[3] :
727 policy->l_extent.end);
730 LDLM_LOCK_PUT(old_lock);
731 if (flags & LDLM_FL_TEST_LOCK && rc)
737 /* Returns a referenced lock */
738 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
739 struct lustre_handle *parent_lock_handle,
740 struct ldlm_res_id res_id, __u32 type,
742 ldlm_blocking_callback blocking,
743 ldlm_completion_callback completion,
744 ldlm_glimpse_callback glimpse,
745 void *data, __u32 lvb_len)
747 struct ldlm_resource *res, *parent_res = NULL;
748 struct ldlm_lock *lock, *parent_lock = NULL;
751 if (parent_lock_handle) {
752 parent_lock = ldlm_handle2lock(parent_lock_handle);
754 parent_res = parent_lock->l_resource;
757 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
761 lock = ldlm_lock_new(parent_lock, res);
762 ldlm_resource_putref(res);
763 if (parent_lock != NULL)
764 LDLM_LOCK_PUT(parent_lock);
769 lock->l_req_mode = mode;
770 lock->l_ast_data = data;
771 lock->l_blocking_ast = blocking;
772 lock->l_completion_ast = completion;
773 lock->l_glimpse_ast = glimpse;
776 lock->l_lvb_len = lvb_len;
777 OBD_ALLOC(lock->l_lvb_data, lvb_len);
778 if (lock->l_lvb_data == NULL) {
779 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
787 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
788 struct ldlm_lock **lockp,
789 void *cookie, int *flags)
791 struct ldlm_lock *lock = *lockp;
792 struct ldlm_resource *res = lock->l_resource;
793 int local = res->lr_namespace->ns_client;
794 ldlm_processing_policy policy;
795 ldlm_error_t rc = ELDLM_OK;
798 /* policies are not executed on the client or during replay */
799 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
800 && !local && ns->ns_policy) {
801 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
803 if (rc == ELDLM_LOCK_REPLACED) {
804 /* The lock that was returned has already been granted,
805 * and placed into lockp. If it's not the same as the
806 * one we passed in, then destroy the old one and our
807 * work here is done. */
808 if (lock != *lockp) {
809 ldlm_lock_destroy(lock);
812 *flags |= LDLM_FL_LOCK_CHANGED;
814 } else if (rc == ELDLM_LOCK_ABORTED ||
815 (rc == 0 && (*flags & LDLM_FL_INTENT_ONLY))) {
816 ldlm_lock_destroy(lock);
819 LASSERT(rc == ELDLM_OK);
822 l_lock(&ns->ns_lock);
823 if (local && lock->l_req_mode == lock->l_granted_mode) {
824 /* The server returned a blocked lock, but it was granted before
825 * we got a chance to actually enqueue it. We don't need to do
827 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
828 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
832 /* Some flags from the enqueue want to make it into the AST, via the
834 lock->l_flags |= (*flags & LDLM_AST_DISCARD_DATA);
836 /* This distinction between local lock trees is very important; a client
837 * namespace only has information about locks taken by that client, and
838 * thus doesn't have enough information to decide for itself if it can
839 * be granted (below). In this case, we do exactly what the server
840 * tells us to do, as dictated by the 'flags'.
842 * We do exactly the same thing during recovery, when the server is
843 * more or less trusting the clients not to lie.
845 * FIXME (bug 268): Detect obvious lies by checking compatibility in
846 * granted/converting queues. */
847 ldlm_resource_unlink_lock(lock);
849 if (*flags & LDLM_FL_BLOCK_CONV)
850 ldlm_resource_add_lock(res, &res->lr_converting, lock);
851 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
852 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
854 ldlm_grant_lock(lock, NULL, 0, 0);
856 } else if (*flags & LDLM_FL_REPLAY) {
857 if (*flags & LDLM_FL_BLOCK_CONV) {
858 ldlm_resource_add_lock(res, &res->lr_converting, lock);
860 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
861 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
863 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
864 ldlm_grant_lock(lock, NULL, 0, 0);
867 /* If no flags, fall through to normal enqueue path. */
870 policy = ldlm_processing_policy_table[res->lr_type];
871 policy(lock, flags, 1, &rc);
874 l_unlock(&ns->ns_lock);
878 /* Must be called with namespace taken: queue is waiting or converting. */
879 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue)
881 struct list_head *tmp, *pos;
882 ldlm_processing_policy policy;
884 int rc = LDLM_ITER_CONTINUE;
888 policy = ldlm_processing_policy_table[res->lr_type];
891 list_for_each_safe(tmp, pos, queue) {
892 struct ldlm_lock *pending;
893 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
895 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
898 rc = policy(pending, &flags, 0, &err);
899 if (rc != LDLM_ITER_CONTINUE)
906 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list)
908 struct list_head *tmp, *pos;
912 l_check_no_ns_lock(ns);
914 list_for_each_safe(tmp, pos, rpc_list) {
915 struct ldlm_ast_work *w =
916 list_entry(tmp, struct ldlm_ast_work, w_list);
918 /* It's possible to receive a completion AST before we've set
919 * the l_completion_ast pointer: either because the AST arrived
920 * before the reply, or simply because there's a small race
921 * window between receiving the reply and finishing the local
924 * This can't happen with the blocking_ast, however, because we
925 * will never call the local blocking_ast until we drop our
926 * reader/writer reference, which we won't do until we get the
927 * reply and finish enqueueing. */
928 LASSERT(w->w_lock != NULL);
930 LASSERT(w->w_lock->l_blocking_ast != NULL);
931 rc = w->w_lock->l_blocking_ast
932 (w->w_lock, &w->w_desc, w->w_data,
934 } else if (w->w_lock->l_completion_ast != NULL) {
935 LASSERT(w->w_lock->l_completion_ast != NULL);
936 rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags,
944 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
945 "disconnect client\n");
946 LDLM_LOCK_PUT(w->w_lock);
947 list_del(&w->w_list);
948 OBD_FREE(w, sizeof(*w));
953 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
955 ldlm_reprocess_all(res);
956 return LDLM_ITER_CONTINUE;
959 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
963 l_lock(&ns->ns_lock);
964 for (i = 0; i < RES_HASH_SIZE; i++) {
965 struct list_head *tmp, *next;
966 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
967 struct ldlm_resource *res =
968 list_entry(tmp, struct ldlm_resource, lr_hash);
970 ldlm_resource_getref(res);
971 l_unlock(&ns->ns_lock);
972 rc = reprocess_one_queue(res, NULL);
973 l_lock(&ns->ns_lock);
975 ldlm_resource_putref(res);
976 if (rc == LDLM_ITER_STOP)
981 l_unlock(&ns->ns_lock);
985 void ldlm_reprocess_all(struct ldlm_resource *res)
987 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
991 /* Local lock trees don't get reprocessed. */
992 if (res->lr_namespace->ns_client) {
998 l_lock(&res->lr_namespace->ns_lock);
999 res->lr_tmp = &rpc_list;
1001 rc = ldlm_reprocess_queue(res, &res->lr_converting);
1002 if (rc == LDLM_ITER_CONTINUE)
1003 ldlm_reprocess_queue(res, &res->lr_waiting);
1006 l_unlock(&res->lr_namespace->ns_lock);
1008 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
1009 if (rc == -ERESTART) {
1010 LASSERT(list_empty(&rpc_list));
1016 void ldlm_cancel_callback(struct ldlm_lock *lock)
1018 l_lock(&lock->l_resource->lr_namespace->ns_lock);
1019 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1020 lock->l_flags |= LDLM_FL_CANCEL;
1021 if (lock->l_blocking_ast) {
1022 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
1023 // l_check_no_ns_lock(lock->l_resource->lr_namespace);
1024 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1028 LDLM_DEBUG(lock, "no blocking ast");
1031 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
1034 void ldlm_lock_cancel(struct ldlm_lock *lock)
1036 struct ldlm_resource *res;
1037 struct ldlm_namespace *ns;
1040 /* There's no race between calling this and taking the ns lock below;
1041 * a lock can only be put on the waiting list once, because it can only
1042 * issue a blocking AST once. */
1043 ldlm_del_waiting_lock(lock);
1045 res = lock->l_resource;
1046 ns = res->lr_namespace;
1048 l_lock(&ns->ns_lock);
1049 /* Please do not, no matter how tempting, remove this LBUG without
1050 * talking to me first. -phik */
1051 if (lock->l_readers || lock->l_writers) {
1052 LDLM_ERROR(lock, "lock still has references");
1056 ldlm_cancel_callback(lock);
1058 ldlm_resource_unlink_lock(lock);
1059 ldlm_lock_destroy(lock);
1060 l_unlock(&ns->ns_lock);
1064 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1066 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1072 lock->l_ast_data = data;
1073 LDLM_LOCK_PUT(lock);
1077 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1079 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
1080 struct ldlm_lock *lock;
1081 struct ldlm_resource *res;
1083 l_lock(&ns->ns_lock);
1084 while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) {
1085 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
1086 struct ldlm_lock, l_export_chain);
1087 res = ldlm_resource_getref(lock->l_resource);
1088 LDLM_DEBUG(lock, "export %p", exp);
1089 ldlm_lock_cancel(lock);
1090 l_unlock(&ns->ns_lock);
1091 ldlm_reprocess_all(res);
1092 ldlm_resource_putref(res);
1093 l_lock(&ns->ns_lock);
1095 l_unlock(&ns->ns_lock);
1098 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1101 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1102 struct ldlm_resource *res;
1103 struct ldlm_namespace *ns;
1109 if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1110 *flags |= LDLM_FL_BLOCK_GRANTED;
1111 RETURN(lock->l_resource);
1114 LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1115 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1117 res = lock->l_resource;
1118 ns = res->lr_namespace;
1120 l_lock(&ns->ns_lock);
1122 old_mode = lock->l_req_mode;
1123 lock->l_req_mode = new_mode;
1124 ldlm_resource_unlink_lock(lock);
1126 /* If this is a local resource, put it on the appropriate list. */
1127 if (res->lr_namespace->ns_client) {
1128 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1129 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1131 /* This should never happen, because of the way the
1132 * server handles conversions. */
1133 LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1137 res->lr_tmp = &rpc_list;
1138 ldlm_grant_lock(lock, NULL, 0, 0);
1141 /* FIXME: completion handling not with ns_lock held ! */
1142 if (lock->l_completion_ast)
1143 lock->l_completion_ast(lock, 0, NULL);
1147 ldlm_processing_policy policy;
1148 policy = ldlm_processing_policy_table[res->lr_type];
1149 res->lr_tmp = &rpc_list;
1150 rc = policy(lock, &pflags, 0, &err);
1152 if (rc == LDLM_ITER_STOP) {
1153 lock->l_req_mode = old_mode;
1154 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1157 *flags |= LDLM_FL_BLOCK_GRANTED;
1162 l_unlock(&ns->ns_lock);
1165 ldlm_run_ast_work(ns, &rpc_list);
1169 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1171 char str[PTL_NALFMT_SIZE];
1172 struct obd_device *obd = NULL;
1174 if (!((portal_debug | D_ERROR) & level))
1178 CDEBUG(level, " NULL LDLM lock\n");
1182 CDEBUG(level, " -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d)\n",
1183 lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1185 if (lock->l_conn_export != NULL)
1186 obd = lock->l_conn_export->exp_obd;
1187 if (lock->l_export && lock->l_export->exp_connection) {
1188 CDEBUG(level, " Node: NID %s on %s (rhandle: "LPX64")\n",
1189 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str),
1190 lock->l_export->exp_connection->c_peer.peer_ni->pni_name,
1191 lock->l_remote_handle.cookie);
1192 } else if (obd == NULL) {
1193 CDEBUG(level, " Node: local\n");
1195 struct obd_import *imp = obd->u.cli.cl_import;
1196 CDEBUG(level, " Node: NID %s on %s (rhandle: "LPX64")\n",
1197 ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
1198 imp->imp_connection->c_peer.peer_ni->pni_name,
1199 lock->l_remote_handle.cookie);
1201 CDEBUG(level, " Resource: %p ("LPU64"/"LPU64")\n", lock->l_resource,
1202 lock->l_resource->lr_name.name[0],
1203 lock->l_resource->lr_name.name[1]);
1204 CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1205 "write: %d\n", ldlm_lockname[lock->l_req_mode],
1206 ldlm_lockname[lock->l_granted_mode],
1207 atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers);
1208 if (lock->l_resource->lr_type == LDLM_EXTENT)
1209 CDEBUG(level, " Extent: "LPU64" -> "LPU64
1210 " (req "LPU64"-"LPU64")\n",
1211 lock->l_policy_data.l_extent.start,
1212 lock->l_policy_data.l_extent.end,
1213 lock->l_req_extent.start, lock->l_req_extent.end);
1214 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1215 CDEBUG(level, " Pid: "LPU64" Extent: "LPU64" -> "LPU64"\n",
1216 lock->l_policy_data.l_flock.pid,
1217 lock->l_policy_data.l_flock.start,
1218 lock->l_policy_data.l_flock.end);
1219 else if (lock->l_resource->lr_type == LDLM_IBITS)
1220 CDEBUG(level, " Bits: "LPX64"\n",
1221 lock->l_policy_data.l_inodebits.bits);
1224 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1226 struct ldlm_lock *lock;
1228 lock = ldlm_handle2lock(lockh);
1232 ldlm_lock_dump(D_OTHER, lock, 0);
1234 LDLM_LOCK_PUT(lock);