1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
27 # include <linux/slab.h>
28 # include <linux/dcache.h>
29 # include <linux/namei.h>
30 # include <linux/module.h>
31 # include <linux/lustre_dlm.h>
33 # include <liblustre.h>
34 # include <libcfs/kp30.h>
37 #include <linux/obd_class.h>
38 #include "ldlm_internal.h"
40 //struct lustre_lock ldlm_everything_lock;
43 char *ldlm_lockname[] = {
53 char *ldlm_typename[] = {
60 char *ldlm_it2str(int it)
67 case (IT_OPEN | IT_CREAT):
82 CERROR("Unknown intent %d\n", it);
87 extern kmem_cache_t *ldlm_lock_slab;
89 static ldlm_processing_policy ldlm_processing_policy_table[] = {
90 [LDLM_PLAIN] ldlm_process_plain_lock,
91 [LDLM_EXTENT] ldlm_process_extent_lock,
93 [LDLM_FLOCK] ldlm_process_flock_lock,
95 [LDLM_IBITS] ldlm_process_inodebits_lock,
98 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
100 return ldlm_processing_policy_table[res->lr_type];
103 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
109 * REFCOUNTED LOCK OBJECTS
114 * Lock refcounts, during creation:
115 * - one special one for allocation, dec'd only once in destroy
116 * - one for being a lock that's in-use
117 * - one for the addref associated with a new lock
119 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
121 atomic_inc(&lock->l_refc);
125 void ldlm_lock_put(struct ldlm_lock *lock)
129 LASSERT(lock->l_resource != LP_POISON);
130 LASSERT(atomic_read(&lock->l_refc) > 0);
131 if (atomic_dec_and_test(&lock->l_refc)) {
132 struct ldlm_resource *res = lock->l_resource;
133 struct ldlm_namespace *ns = res->lr_namespace;
135 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
137 LASSERT(lock->l_resource != LP_POISON);
139 LASSERT(lock->l_destroyed);
140 LASSERT(list_empty(&lock->l_res_link));
143 LDLM_LOCK_PUT(lock->l_parent);
146 ldlm_resource_putref(lock->l_resource);
147 lock->l_resource = NULL;
149 class_export_put(lock->l_export);
150 atomic_dec(&ns->ns_locks);
152 if (lock->l_lvb_data != NULL)
153 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
155 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
161 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
164 spin_lock(&lock->l_resource->lr_namespace->ns_unused_lock);
165 if (!list_empty(&lock->l_lru)) {
166 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
167 list_del_init(&lock->l_lru);
168 lock->l_resource->lr_namespace->ns_nr_unused--;
169 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
171 spin_unlock(&lock->l_resource->lr_namespace->ns_unused_lock);
175 /* This used to have a 'strict' flact, which recovery would use to mark an
176 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
177 * shall explain why it's gone: with the new hash table scheme, once you call
178 * ldlm_lock_destroy, you can never drop your final references on this lock.
179 * Because it's not in the hash table anymore. -phil */
180 void ldlm_lock_destroy(struct ldlm_lock *lock)
184 lock_res(lock->l_resource);
186 if (!list_empty(&lock->l_children)) {
187 LDLM_ERROR(lock, "still has children (%p)!",
188 lock->l_children.next);
189 ldlm_lock_dump(D_ERROR, lock, 0);
192 if (lock->l_readers || lock->l_writers) {
193 LDLM_ERROR(lock, "lock still has references");
194 ldlm_lock_dump(D_ERROR, lock, 0);
198 if (!list_empty(&lock->l_res_link)) {
199 LDLM_ERROR(lock, "lock still on resource");
200 ldlm_lock_dump(D_ERROR, lock, 0);
204 if (lock->l_destroyed) {
205 LASSERT(list_empty(&lock->l_lru));
206 unlock_res(lock->l_resource);
210 lock->l_destroyed = 1;
212 if (lock->l_export) {
213 spin_lock(&lock->l_export->exp_ldlm_data.led_lock);
214 if (!list_empty(&lock->l_export_chain))
215 list_del_init(&lock->l_export_chain);
216 spin_unlock(&lock->l_export->exp_ldlm_data.led_lock);
218 LASSERT(list_empty(&lock->l_export_chain));
221 ldlm_lock_remove_from_lru(lock);
222 class_handle_unhash(&lock->l_handle);
225 /* Wake anyone waiting for this lock */
226 /* FIXME: I should probably add yet another flag, instead of using
227 * l_export to only call this on clients */
229 class_export_put(lock->l_export);
230 lock->l_export = NULL;
231 if (lock->l_export && lock->l_completion_ast)
232 lock->l_completion_ast(lock, 0);
235 unlock_res(lock->l_resource);
240 /* this is called by portals_handle2object with the handle lock taken */
241 static void lock_handle_addref(void *lock)
243 LDLM_LOCK_GET((struct ldlm_lock *)lock);
247 * usage: pass in a resource on which you have done ldlm_resource_get
248 * pass in a parent lock on which you have done a ldlm_lock_get
249 * after return, ldlm_*_put the resource and parent
250 * returns: lock with refcount 2 - one for current caller and one for remote
252 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
253 struct ldlm_resource *resource)
255 struct ldlm_lock *lock;
258 if (resource == NULL)
261 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, SLAB_NOFS, sizeof(*lock));
265 lock->l_resource = ldlm_resource_getref(resource);
267 atomic_set(&lock->l_refc, 2);
268 INIT_LIST_HEAD(&lock->l_children);
269 INIT_LIST_HEAD(&lock->l_res_link);
270 INIT_LIST_HEAD(&lock->l_lru);
271 INIT_LIST_HEAD(&lock->l_export_chain);
272 INIT_LIST_HEAD(&lock->l_pending_chain);
273 INIT_LIST_HEAD(&lock->l_tmp);
274 INIT_LIST_HEAD(&lock->l_bl_ast);
275 INIT_LIST_HEAD(&lock->l_cp_ast);
276 init_waitqueue_head(&lock->l_waitq);
277 lock->l_blocking_lock = NULL;
279 atomic_inc(&resource->lr_namespace->ns_locks);
281 if (parent != NULL) {
282 spin_lock(&resource->lr_namespace->ns_hash_lock);
283 lock->l_parent = LDLM_LOCK_GET(parent);
284 list_add(&lock->l_childof, &parent->l_children);
285 spin_unlock(&resource->lr_namespace->ns_hash_lock);
288 INIT_LIST_HEAD(&lock->l_handle.h_link);
289 class_handle_hash(&lock->l_handle, lock_handle_addref);
294 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
295 struct ldlm_res_id new_resid)
297 struct ldlm_resource *oldres = lock->l_resource;
301 if (memcmp(&new_resid, &lock->l_resource->lr_name,
302 sizeof(lock->l_resource->lr_name)) == 0) {
308 LASSERT(new_resid.name[0] != 0);
310 /* This function assumes that the lock isn't on any lists */
311 LASSERT(list_empty(&lock->l_res_link));
313 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
314 lock->l_resource->lr_type,
316 if (lock->l_resource == NULL) {
323 /* ...and the flowers are still standing! */
324 ldlm_resource_putref(oldres);
333 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
335 lockh->cookie = lock->l_handle.h_cookie;
338 /* if flags: atomically get the lock and set the flags.
339 * Return NULL if flag already set
342 struct ldlm_lock *__ldlm_handle2lock(struct lustre_handle *handle, int flags)
344 struct ldlm_namespace *ns;
345 struct ldlm_lock *lock = NULL, *retval = NULL;
350 lock = class_handle2object(handle->cookie);
354 LASSERT(lock->l_resource != NULL);
355 ns = lock->l_resource->lr_namespace;
358 lock_res(lock->l_resource);
360 /* It's unlikely but possible that someone marked the lock as
361 * destroyed after we did handle2object on it */
362 if (lock->l_destroyed) {
363 unlock_res(lock->l_resource);
364 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
369 if (flags && (lock->l_flags & flags)) {
370 unlock_res(lock->l_resource);
376 lock->l_flags |= flags;
378 unlock_res(lock->l_resource);
385 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
386 struct lustre_handle *handle)
388 struct ldlm_lock *retval = NULL;
389 retval = __ldlm_handle2lock(handle, 0);
393 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
395 ldlm_res2desc(lock->l_resource, &desc->l_resource);
396 desc->l_req_mode = lock->l_req_mode;
397 desc->l_granted_mode = lock->l_granted_mode;
398 memcpy(&desc->l_policy_data, &lock->l_policy_data,
399 sizeof(desc->l_policy_data));
402 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
403 struct list_head *work_list)
405 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
406 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
407 lock->l_flags |= LDLM_FL_AST_SENT;
408 /* If the enqueuing client said so, tell the AST recipient to
409 * discard dirty data, rather than writing back. */
410 if (new->l_flags & LDLM_AST_DISCARD_DATA)
411 lock->l_flags |= LDLM_FL_DISCARD_DATA;
412 LASSERT(list_empty(&lock->l_bl_ast));
413 list_add(&lock->l_bl_ast, work_list);
415 LASSERT(lock->l_blocking_lock == NULL);
416 lock->l_blocking_lock = LDLM_LOCK_GET(new);
420 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
422 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
423 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
424 lock->l_flags |= LDLM_FL_CP_REQD;
425 LASSERT(list_empty(&lock->l_cp_ast));
426 list_add(&lock->l_cp_ast, work_list);
431 /* must be called with lr_lock held */
432 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
433 struct list_head *work_list)
436 check_res_locked(lock->l_resource);
438 ldlm_add_bl_work_item(lock, new, work_list);
440 ldlm_add_cp_work_item(lock, work_list);
444 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
446 struct ldlm_lock *lock;
448 lock = ldlm_handle2lock(lockh);
449 ldlm_lock_addref_internal(lock, mode);
453 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
455 ldlm_lock_remove_from_lru(lock);
456 if (mode & (LCK_NL | LCK_CR | LCK_PR))
458 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
460 lock->l_last_used = jiffies;
462 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
465 /* only called for local locks */
466 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
468 lock_res(lock->l_resource);
469 ldlm_lock_addref_internal_nolock(lock, mode);
470 unlock_res(lock->l_resource);
473 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
475 struct ldlm_namespace *ns;
478 ns = lock->l_resource->lr_namespace;
480 lock_res(lock->l_resource);
482 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
483 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
484 LASSERT(lock->l_readers > 0);
487 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
488 LASSERT(lock->l_writers > 0);
492 if (lock->l_flags & LDLM_FL_LOCAL &&
493 !lock->l_readers && !lock->l_writers) {
494 /* If this is a local lock on a server namespace and this was
495 * the last reference, cancel the lock. */
496 CDEBUG(D_INFO, "forcing cancel of local lock\n");
497 lock->l_flags |= LDLM_FL_CBPENDING;
500 if (!lock->l_readers && !lock->l_writers &&
501 (lock->l_flags & LDLM_FL_CBPENDING)) {
502 /* If we received a blocked AST and this was the last reference,
503 * run the callback. */
504 if (ns->ns_client == LDLM_NAMESPACE_SERVER && lock->l_export)
505 CERROR("FL_CBPENDING set on non-local lock--just a "
508 LDLM_DEBUG(lock, "final decref done on cbpending lock");
510 LDLM_LOCK_GET(lock); /* dropped by bl thread */
511 ldlm_lock_remove_from_lru(lock);
512 unlock_res(lock->l_resource);
513 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
514 ldlm_bl_to_thread(ns, NULL, lock) != 0)
515 ldlm_handle_bl_callback(ns, NULL, lock);
516 } else if (ns->ns_client == LDLM_NAMESPACE_CLIENT &&
517 !lock->l_readers && !lock->l_writers) {
518 /* If this is a client-side namespace and this was the last
519 * reference, put it on the LRU. */
520 LASSERT(list_empty(&lock->l_lru));
521 LASSERT(ns->ns_nr_unused >= 0);
522 spin_lock(&ns->ns_unused_lock);
523 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
525 spin_unlock(&ns->ns_unused_lock);
526 unlock_res(lock->l_resource);
527 ldlm_cancel_lru(ns, LDLM_ASYNC);
529 unlock_res(lock->l_resource);
532 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
537 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
539 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
540 LASSERT(lock != NULL);
541 ldlm_lock_decref_internal(lock, mode);
545 /* This will drop a lock reference and mark it for destruction, but will not
546 * necessarily cancel the lock before returning. */
547 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
549 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
552 LASSERT(lock != NULL);
554 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
555 lock_res(lock->l_resource);
556 lock->l_flags |= LDLM_FL_CBPENDING;
557 unlock_res(lock->l_resource);
558 ldlm_lock_decref_internal(lock, mode);
563 * - ldlm_lock_enqueue
564 * - ldlm_reprocess_queue
565 * - ldlm_lock_convert
567 * must be called with lr_lock held
569 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
571 struct ldlm_resource *res = lock->l_resource;
574 check_res_locked(res);
576 lock->l_granted_mode = lock->l_req_mode;
577 ldlm_resource_add_lock(res, &res->lr_granted, lock);
579 if (lock->l_granted_mode < res->lr_most_restr)
580 res->lr_most_restr = lock->l_granted_mode;
582 if (work_list && lock->l_completion_ast != NULL)
583 ldlm_add_ast_work_item(lock, NULL, work_list);
588 /* returns a referenced lock or NULL. See the flag descriptions below, in the
589 * comment above ldlm_lock_match */
590 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
591 ldlm_policy_data_t *policy,
592 struct ldlm_lock *old_lock, int flags)
594 struct ldlm_lock *lock;
595 struct list_head *tmp;
597 list_for_each(tmp, queue) {
598 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
600 if (lock == old_lock)
603 /* llite sometimes wants to match locks that will be
604 * canceled when their users drop, but we allow it to match
605 * if it passes in CBPENDING and the lock still has users.
606 * this is generally only going to be used by children
607 * whose parents already hold a lock so forward progress
608 * can still happen. */
609 if (lock->l_flags & LDLM_FL_CBPENDING &&
610 !(flags & LDLM_FL_CBPENDING))
612 if (lock->l_flags & LDLM_FL_CBPENDING &&
613 lock->l_readers == 0 && lock->l_writers == 0)
616 if (!(lock->l_req_mode & mode))
619 if (lock->l_resource->lr_type == LDLM_EXTENT &&
620 (lock->l_policy_data.l_extent.start >
621 policy->l_extent.start ||
622 lock->l_policy_data.l_extent.end < policy->l_extent.end))
625 if (lock->l_resource->lr_type == LDLM_EXTENT &&
627 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
630 /* We match if we have existing lock with same or wider set
632 if (lock->l_resource->lr_type == LDLM_IBITS &&
633 ((lock->l_policy_data.l_inodebits.bits &
634 policy->l_inodebits.bits) !=
635 policy->l_inodebits.bits))
638 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
641 if ((flags & LDLM_FL_LOCAL_ONLY) &&
642 !(lock->l_flags & LDLM_FL_LOCAL))
645 if (flags & LDLM_FL_TEST_LOCK)
648 ldlm_lock_addref_internal_nolock(lock, mode);
655 void ldlm_lock_allow_match(struct ldlm_lock *lock)
657 lock_res(lock->l_resource);
658 lock->l_flags |= LDLM_FL_CAN_MATCH;
659 wake_up(&lock->l_waitq);
660 unlock_res(lock->l_resource);
663 /* Can be called in two ways:
665 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
666 * for a duplicate of.
668 * Otherwise, all of the fields must be filled in, to match against.
670 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
671 * server (ie, connh is NULL)
672 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
673 * list will be considered
674 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
675 * to be canceled can still be matched as long as they still have reader
676 * or writer refernces
677 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
678 * just tell us if we would have matched.
680 * Returns 1 if it finds an already-existing lock that is compatible; in this
681 * case, lockh is filled in with a addref()ed lock
683 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
684 struct ldlm_res_id *res_id, __u32 type,
685 ldlm_policy_data_t *policy, ldlm_mode_t mode,
686 struct lustre_handle *lockh)
688 struct ldlm_resource *res;
689 struct ldlm_lock *lock, *old_lock = NULL;
694 old_lock = ldlm_handle2lock(lockh);
697 ns = old_lock->l_resource->lr_namespace;
698 res_id = &old_lock->l_resource->lr_name;
699 type = old_lock->l_resource->lr_type;
700 mode = old_lock->l_req_mode;
703 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
705 LASSERT(old_lock == NULL);
711 lock = search_queue(&res->lr_granted, mode, policy, old_lock, flags);
714 if (flags & LDLM_FL_BLOCK_GRANTED)
716 lock = search_queue(&res->lr_converting, mode, policy, old_lock, flags);
719 lock = search_queue(&res->lr_waiting, mode, policy, old_lock, flags);
726 ldlm_resource_putref(res);
729 ldlm_lock2handle(lock, lockh);
730 if (!(lock->l_flags & LDLM_FL_CAN_MATCH)) {
731 struct l_wait_info lwi;
732 if (lock->l_completion_ast) {
733 int err = lock->l_completion_ast(lock,
734 LDLM_FL_WAIT_NOREPROC,
742 lwi = LWI_TIMEOUT_INTR(obd_timeout*HZ, NULL,NULL,NULL);
744 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
745 l_wait_event(lock->l_waitq,
746 (lock->l_flags & LDLM_FL_CAN_MATCH), &lwi);
752 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
753 type == LDLM_PLAIN ? res_id->name[2] :
754 policy->l_extent.start,
755 type == LDLM_PLAIN ? res_id->name[3] :
756 policy->l_extent.end);
757 } else if (!(flags & LDLM_FL_TEST_LOCK)) {/* less verbose for test-only */
758 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
759 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
760 type, mode, res_id->name[0], res_id->name[1],
761 type == LDLM_PLAIN ? res_id->name[2] :
762 policy->l_extent.start,
763 type == LDLM_PLAIN ? res_id->name[3] :
764 policy->l_extent.end);
768 LDLM_LOCK_PUT(old_lock);
769 if (flags & LDLM_FL_TEST_LOCK && rc)
775 /* Returns a referenced lock */
776 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
777 struct lustre_handle *parent_lock_handle,
778 struct ldlm_res_id res_id, __u32 type,
780 ldlm_blocking_callback blocking,
781 ldlm_completion_callback completion,
782 ldlm_glimpse_callback glimpse,
783 void *data, __u32 lvb_len)
785 struct ldlm_resource *res, *parent_res = NULL;
786 struct ldlm_lock *lock, *parent_lock = NULL;
789 if (parent_lock_handle) {
790 parent_lock = ldlm_handle2lock(parent_lock_handle);
792 parent_res = parent_lock->l_resource;
795 res = ldlm_resource_get(ns, parent_res, res_id,
800 lock = ldlm_lock_new(parent_lock, res);
801 ldlm_resource_putref(res);
802 if (parent_lock != NULL)
803 LDLM_LOCK_PUT(parent_lock);
808 lock->l_req_mode = mode;
809 lock->l_ast_data = data;
810 lock->l_blocking_ast = blocking;
811 lock->l_completion_ast = completion;
812 lock->l_glimpse_ast = glimpse;
813 lock->l_pid = current->pid;
816 lock->l_lvb_len = lvb_len;
817 OBD_ALLOC(lock->l_lvb_data, lvb_len);
818 if (lock->l_lvb_data == NULL) {
819 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
827 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
828 struct ldlm_lock **lockp,
829 void *cookie, int *flags)
831 struct ldlm_lock *lock = *lockp;
832 struct ldlm_resource *res = lock->l_resource;
833 int local = res->lr_namespace->ns_client;
834 ldlm_processing_policy policy;
835 ldlm_error_t rc = ELDLM_OK;
838 /* policies are not executed on the client or during replay */
839 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
840 && !local && ns->ns_policy) {
841 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
843 if (rc == ELDLM_LOCK_REPLACED) {
844 /* The lock that was returned has already been granted,
845 * and placed into lockp. If it's not the same as the
846 * one we passed in, then destroy the old one and our
847 * work here is done. */
848 if (lock != *lockp) {
849 ldlm_lock_destroy(lock);
852 *flags |= LDLM_FL_LOCK_CHANGED;
854 } else if (rc == ELDLM_LOCK_ABORTED ||
855 (rc == 0 && (*flags & LDLM_FL_INTENT_ONLY))) {
856 ldlm_lock_destroy(lock);
859 LASSERT(rc == ELDLM_OK);
862 lock_res(lock->l_resource);
863 if (local && lock->l_req_mode == lock->l_granted_mode) {
864 /* The server returned a blocked lock, but it was granted before
865 * we got a chance to actually enqueue it. We don't need to do
867 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
868 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
872 /* Some flags from the enqueue want to make it into the AST, via the
874 lock->l_flags |= (*flags & LDLM_AST_DISCARD_DATA);
876 /* This distinction between local lock trees is very important; a client
877 * namespace only has information about locks taken by that client, and
878 * thus doesn't have enough information to decide for itself if it can
879 * be granted (below). In this case, we do exactly what the server
880 * tells us to do, as dictated by the 'flags'.
882 * We do exactly the same thing during recovery, when the server is
883 * more or less trusting the clients not to lie.
885 * FIXME (bug 268): Detect obvious lies by checking compatibility in
886 * granted/converting queues. */
887 ldlm_resource_unlink_lock(lock);
889 if (*flags & LDLM_FL_BLOCK_CONV)
890 ldlm_resource_add_lock(res, &res->lr_converting, lock);
891 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
892 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
894 ldlm_grant_lock(lock, NULL);
896 } else if (*flags & LDLM_FL_REPLAY) {
897 if (*flags & LDLM_FL_BLOCK_CONV) {
898 ldlm_resource_add_lock(res, &res->lr_converting, lock);
900 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
901 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
903 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
904 ldlm_grant_lock(lock, NULL);
907 /* If no flags, fall through to normal enqueue path. */
910 policy = ldlm_processing_policy_table[res->lr_type];
911 policy(lock, flags, 1, &rc, NULL);
914 unlock_res(lock->l_resource);
918 /* Must be called with namespace taken: queue is waiting or converting. */
919 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
920 struct list_head *work_list)
922 struct list_head *tmp, *pos;
923 ldlm_processing_policy policy;
925 int rc = LDLM_ITER_CONTINUE;
929 check_res_locked(res);
931 policy = ldlm_processing_policy_table[res->lr_type];
934 list_for_each_safe(tmp, pos, queue) {
935 struct ldlm_lock *pending;
936 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
938 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
941 rc = policy(pending, &flags, 0, &err, work_list);
942 if (rc != LDLM_ITER_CONTINUE)
949 int ldlm_run_bl_ast_work(struct list_head *rpc_list)
951 struct list_head *tmp, *pos;
952 struct ldlm_lock_desc d;
953 int rc = 0, retval = 0;
956 list_for_each_safe(tmp, pos, rpc_list) {
957 struct ldlm_lock *lock =
958 list_entry(tmp, struct ldlm_lock, l_bl_ast);
960 /* nobody should touch l_bl_ast */
961 lock_res(lock->l_resource);
962 list_del_init(&lock->l_bl_ast);
964 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
965 LASSERT(lock->l_bl_ast_run == 0);
966 LASSERT(lock->l_blocking_lock);
967 lock->l_bl_ast_run++;
968 unlock_res(lock->l_resource);
970 ldlm_lock2desc(lock->l_blocking_lock, &d);
972 LDLM_LOCK_PUT(lock->l_blocking_lock);
973 lock->l_blocking_lock = NULL;
974 rc = lock->l_blocking_ast(lock, &d, NULL, LDLM_CB_BLOCKING);
979 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
980 "disconnect client\n");
986 int ldlm_run_cp_ast_work(struct list_head *rpc_list)
988 struct list_head *tmp, *pos;
989 int rc = 0, retval = 0;
992 /* It's possible to receive a completion AST before we've set
993 * the l_completion_ast pointer: either because the AST arrived
994 * before the reply, or simply because there's a small race
995 * window between receiving the reply and finishing the local
998 * This can't happen with the blocking_ast, however, because we
999 * will never call the local blocking_ast until we drop our
1000 * reader/writer reference, which we won't do until we get the
1001 * reply and finish enqueueing. */
1003 list_for_each_safe(tmp, pos, rpc_list) {
1004 struct ldlm_lock *lock =
1005 list_entry(tmp, struct ldlm_lock, l_cp_ast);
1007 /* nobody should touch l_cp_ast */
1008 lock_res(lock->l_resource);
1009 list_del_init(&lock->l_cp_ast);
1010 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1011 lock->l_flags &= ~LDLM_FL_CP_REQD;
1012 unlock_res(lock->l_resource);
1014 if (lock->l_completion_ast != NULL)
1015 rc = lock->l_completion_ast(lock, 0, 0);
1017 if (rc == -ERESTART)
1020 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
1021 "disconnect client\n");
1022 LDLM_LOCK_PUT(lock);
1027 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1029 ldlm_reprocess_all(res);
1030 return LDLM_ITER_CONTINUE;
1033 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1035 struct list_head *tmp;
1038 spin_lock(&ns->ns_hash_lock);
1039 for (i = 0; i < RES_HASH_SIZE; i++) {
1040 tmp = ns->ns_hash[i].next;
1041 while (tmp != &(ns->ns_hash[i])) {
1042 struct ldlm_resource *res =
1043 list_entry(tmp, struct ldlm_resource, lr_hash);
1045 ldlm_resource_getref(res);
1046 spin_unlock(&ns->ns_hash_lock);
1048 rc = reprocess_one_queue(res, NULL);
1050 spin_lock(&ns->ns_hash_lock);
1052 ldlm_resource_putref_locked(res);
1054 if (rc == LDLM_ITER_STOP)
1059 spin_unlock(&ns->ns_hash_lock);
1063 void ldlm_reprocess_all(struct ldlm_resource *res)
1065 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1069 /* Local lock trees don't get reprocessed. */
1070 if (res->lr_namespace->ns_client) {
1077 rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1078 if (rc == LDLM_ITER_CONTINUE)
1079 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1082 rc = ldlm_run_cp_ast_work(&rpc_list);
1083 if (rc == -ERESTART) {
1084 LASSERT(list_empty(&rpc_list));
1090 void ldlm_cancel_callback(struct ldlm_lock *lock)
1092 check_res_locked(lock->l_resource);
1094 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1095 lock->l_flags |= LDLM_FL_CANCEL;
1096 if (lock->l_blocking_ast) {
1097 unlock_res(lock->l_resource);
1098 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1100 lock_res(lock->l_resource);
1102 LDLM_DEBUG(lock, "no blocking ast");
1107 void ldlm_lock_cancel(struct ldlm_lock *lock)
1109 struct ldlm_resource *res;
1110 struct ldlm_namespace *ns;
1113 res = lock->l_resource;
1114 ns = res->lr_namespace;
1116 ldlm_del_waiting_lock(lock);
1119 /* Please do not, no matter how tempting, remove this LBUG without
1120 * talking to me first. -phik */
1121 if (lock->l_readers || lock->l_writers) {
1122 LDLM_ERROR(lock, "lock still has references");
1126 ldlm_cancel_callback(lock);
1128 ldlm_resource_unlink_lock(lock);
1131 ldlm_lock_destroy(lock);
1136 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1138 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1144 lock->l_ast_data = data;
1145 LDLM_LOCK_PUT(lock);
1149 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1151 struct ldlm_lock *lock;
1152 struct ldlm_resource *res;
1154 spin_lock(&exp->exp_ldlm_data.led_lock);
1155 while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) {
1156 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
1157 struct ldlm_lock, l_export_chain);
1158 res = ldlm_resource_getref(lock->l_resource);
1159 LDLM_LOCK_GET(lock);
1160 spin_unlock(&exp->exp_ldlm_data.led_lock);
1162 LDLM_DEBUG(lock, "export %p", exp);
1163 ldlm_lock_cancel(lock);
1164 ldlm_reprocess_all(res);
1166 ldlm_resource_putref(res);
1167 LDLM_LOCK_PUT(lock);
1168 spin_lock(&exp->exp_ldlm_data.led_lock);
1170 spin_unlock(&exp->exp_ldlm_data.led_lock);
1173 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1176 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1177 struct ldlm_resource *res;
1178 struct ldlm_namespace *ns;
1184 if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1185 *flags |= LDLM_FL_BLOCK_GRANTED;
1186 RETURN(lock->l_resource);
1189 LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1190 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1192 res = lock->l_resource;
1193 ns = res->lr_namespace;
1197 old_mode = lock->l_req_mode;
1198 lock->l_req_mode = new_mode;
1199 ldlm_resource_unlink_lock(lock);
1201 /* If this is a local resource, put it on the appropriate list. */
1202 if (res->lr_namespace->ns_client) {
1203 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1204 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1206 /* This should never happen, because of the way the
1207 * server handles conversions. */
1208 LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1212 ldlm_grant_lock(lock, &rpc_list);
1214 /* FIXME: completion handling not with ns_lock held ! */
1215 if (lock->l_completion_ast)
1216 lock->l_completion_ast(lock, 0, NULL);
1220 ldlm_processing_policy policy;
1221 policy = ldlm_processing_policy_table[res->lr_type];
1222 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1223 if (rc == LDLM_ITER_STOP) {
1224 lock->l_req_mode = old_mode;
1225 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1228 *flags |= LDLM_FL_BLOCK_GRANTED;
1232 unlock_res(lock->l_resource);
1235 ldlm_run_cp_ast_work(&rpc_list);
1239 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1241 char str[PTL_NALFMT_SIZE];
1242 struct obd_device *obd = NULL;
1244 if (!((portal_debug | D_ERROR) & level))
1248 CDEBUG(level, " NULL LDLM lock\n");
1252 CDEBUG(level, " -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1253 lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1255 if (lock->l_conn_export != NULL)
1256 obd = lock->l_conn_export->exp_obd;
1257 if (lock->l_export && lock->l_export->exp_connection) {
1258 CDEBUG(level, " Node: NID %s on %s (rhandle: "LPX64")\n",
1259 ptlrpc_peernid2str(&lock->l_export->exp_connection->c_peer, str),
1260 lock->l_export->exp_connection->c_peer.peer_ni->pni_name,
1261 lock->l_remote_handle.cookie);
1262 } else if (obd == NULL) {
1263 CDEBUG(level, " Node: local\n");
1265 struct obd_import *imp = obd->u.cli.cl_import;
1266 CDEBUG(level, " Node: NID %s on %s (rhandle: "LPX64")\n",
1267 ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
1268 imp->imp_connection->c_peer.peer_ni->pni_name,
1269 lock->l_remote_handle.cookie);
1271 CDEBUG(level, " Resource: %p ("LPU64"/"LPU64")\n", lock->l_resource,
1272 lock->l_resource->lr_name.name[0],
1273 lock->l_resource->lr_name.name[1]);
1274 CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1275 "write: %d\n", ldlm_lockname[lock->l_req_mode],
1276 ldlm_lockname[lock->l_granted_mode],
1277 atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers);
1278 if (lock->l_resource->lr_type == LDLM_EXTENT)
1279 CDEBUG(level, " Extent: "LPU64" -> "LPU64
1280 " (req "LPU64"-"LPU64")\n",
1281 lock->l_policy_data.l_extent.start,
1282 lock->l_policy_data.l_extent.end,
1283 lock->l_req_extent.start, lock->l_req_extent.end);
1284 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1285 CDEBUG(level, " Pid: "LPU64" Extent: "LPU64" -> "LPU64"\n",
1286 lock->l_policy_data.l_flock.pid,
1287 lock->l_policy_data.l_flock.start,
1288 lock->l_policy_data.l_flock.end);
1289 else if (lock->l_resource->lr_type == LDLM_IBITS)
1290 CDEBUG(level, " Bits: "LPX64"\n",
1291 lock->l_policy_data.l_inodebits.bits);
1294 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1296 struct ldlm_lock *lock;
1298 lock = ldlm_handle2lock(lockh);
1302 ldlm_lock_dump(D_OTHER, lock, 0);
1304 LDLM_LOCK_PUT(lock);