1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
30 # include <libcfs/libcfs.h>
32 # include <liblustre.h>
33 # include <libcfs/kp30.h>
36 #include <obd_class.h>
37 #include "ldlm_internal.h"
39 //struct lustre_lock ldlm_everything_lock;
42 char *ldlm_lockname[] = {
52 char *ldlm_typename[] = {
59 char *ldlm_it2str(int it)
66 case (IT_OPEN | IT_CREAT):
79 CERROR("Unknown intent %d\n", it);
84 extern cfs_mem_cache_t *ldlm_lock_slab;
85 struct lustre_lock ldlm_handle_lock;
87 static ldlm_processing_policy ldlm_processing_policy_table[] = {
88 [LDLM_PLAIN] ldlm_process_plain_lock,
89 [LDLM_EXTENT] ldlm_process_extent_lock,
91 [LDLM_FLOCK] ldlm_process_flock_lock,
93 [LDLM_IBITS] ldlm_process_inodebits_lock,
96 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
98 return ldlm_processing_policy_table[res->lr_type];
101 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
107 * REFCOUNTED LOCK OBJECTS
112 * Lock refcounts, during creation:
113 * - one special one for allocation, dec'd only once in destroy
114 * - one for being a lock that's in-use
115 * - one for the addref associated with a new lock
117 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
119 atomic_inc(&lock->l_refc);
123 void ldlm_lock_put(struct ldlm_lock *lock)
127 if (atomic_dec_and_test(&lock->l_refc)) {
128 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
129 struct obd_export *export = NULL;
131 l_lock(&ns->ns_lock);
132 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing it.");
133 LASSERT(lock->l_destroyed);
134 LASSERT(list_empty(&lock->l_res_link));
136 spin_lock(&ns->ns_counter_lock);
138 spin_unlock(&ns->ns_counter_lock);
140 ldlm_resource_putref(lock->l_resource);
141 lock->l_resource = NULL;
142 export = lock->l_export;
145 LDLM_LOCK_PUT(lock->l_parent);
147 if (lock->l_lvb_data != NULL)
148 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
150 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
151 l_unlock(&ns->ns_lock);
153 class_export_put(export);
159 void ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
162 l_lock(&lock->l_resource->lr_namespace->ns_lock);
163 if (!list_empty(&lock->l_lru)) {
164 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
165 list_del_init(&lock->l_lru);
166 lock->l_resource->lr_namespace->ns_nr_unused--;
167 LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
169 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
173 /* This used to have a 'strict' flag, which recovery would use to mark an
174 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
175 * shall explain why it's gone: with the new hash table scheme, once you call
176 * ldlm_lock_destroy, you can never drop your final references on this lock.
177 * Because it's not in the hash table anymore. -phil */
178 void ldlm_lock_destroy(struct ldlm_lock *lock)
181 l_lock(&lock->l_resource->lr_namespace->ns_lock);
183 if (!list_empty(&lock->l_children)) {
184 LDLM_ERROR(lock, "still has children (%p)!",
185 lock->l_children.next);
186 ldlm_lock_dump(D_ERROR, lock, 0);
189 if (lock->l_readers || lock->l_writers) {
190 LDLM_ERROR(lock, "lock still has references");
191 ldlm_lock_dump(D_ERROR, lock, 0);
195 if (!list_empty(&lock->l_res_link)) {
196 LDLM_ERROR(lock, "lock still on resource");
197 ldlm_lock_dump(D_ERROR, lock, 0);
201 if (lock->l_destroyed) {
202 LASSERT(list_empty(&lock->l_lru));
203 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
207 lock->l_destroyed = 1;
209 list_del_init(&lock->l_export_chain);
210 ldlm_lock_remove_from_lru(lock);
211 class_handle_unhash(&lock->l_handle);
214 /* Wake anyone waiting for this lock */
215 /* FIXME: I should probably add yet another flag, instead of using
216 * l_export to only call this on clients */
218 class_export_put(lock->l_export);
219 lock->l_export = NULL;
220 if (lock->l_export && lock->l_completion_ast)
221 lock->l_completion_ast(lock, 0);
224 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
229 /* this is called by portals_handle2object with the handle lock taken */
230 static void lock_handle_addref(void *lock)
232 LDLM_LOCK_GET((struct ldlm_lock *)lock);
236 * usage: pass in a resource on which you have done ldlm_resource_get
237 * pass in a parent lock on which you have done a ldlm_lock_get
238 * after return, ldlm_*_put the resource and parent
239 * returns: lock with refcount 2 - one for current caller and one for remote
241 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
242 struct ldlm_resource *resource)
244 struct ldlm_lock *lock;
247 if (resource == NULL)
250 OBD_SLAB_ALLOC(lock, ldlm_lock_slab, CFS_ALLOC_IO, sizeof(*lock));
254 lock->l_resource = ldlm_resource_getref(resource);
256 atomic_set(&lock->l_refc, 2);
257 CFS_INIT_LIST_HEAD(&lock->l_children);
258 CFS_INIT_LIST_HEAD(&lock->l_childof);
259 CFS_INIT_LIST_HEAD(&lock->l_res_link);
260 CFS_INIT_LIST_HEAD(&lock->l_lru);
261 CFS_INIT_LIST_HEAD(&lock->l_export_chain);
262 CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
263 cfs_waitq_init(&lock->l_waitq);
265 spin_lock(&resource->lr_namespace->ns_counter_lock);
266 resource->lr_namespace->ns_locks++;
267 spin_unlock(&resource->lr_namespace->ns_counter_lock);
269 if (parent != NULL) {
270 l_lock(&parent->l_resource->lr_namespace->ns_lock);
271 lock->l_parent = LDLM_LOCK_GET(parent);
272 list_add(&lock->l_childof, &parent->l_children);
273 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
276 CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
277 class_handle_hash(&lock->l_handle, lock_handle_addref);
282 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
283 struct ldlm_res_id new_resid)
285 struct ldlm_resource *oldres = lock->l_resource;
288 l_lock(&ns->ns_lock);
289 if (memcmp(&new_resid, &lock->l_resource->lr_name,
290 sizeof(lock->l_resource->lr_name)) == 0) {
292 l_unlock(&ns->ns_lock);
296 LASSERT(new_resid.name[0] != 0);
298 /* This function assumes that the lock isn't on any lists */
299 LASSERT(list_empty(&lock->l_res_link));
301 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid,
302 lock->l_resource->lr_type, 1);
303 if (lock->l_resource == NULL) {
304 l_unlock(&ns->ns_lock);
309 /* ...and the flowers are still standing! */
310 ldlm_resource_putref(oldres);
312 l_unlock(&ns->ns_lock);
320 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
322 lockh->cookie = lock->l_handle.h_cookie;
325 /* if flags: atomically get the lock and set the flags.
326 * Return NULL if flag already set
329 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
332 struct ldlm_namespace *ns;
333 struct ldlm_lock *lock = NULL, *retval = NULL;
338 lock = class_handle2object(handle->cookie);
342 LASSERT(lock->l_resource != NULL);
343 ns = lock->l_resource->lr_namespace;
346 l_lock(&ns->ns_lock);
348 /* It's unlikely but possible that someone marked the lock as
349 * destroyed after we did handle2object on it */
350 if (lock->l_destroyed) {
351 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
356 if (flags && (lock->l_flags & flags)) {
362 lock->l_flags |= flags;
367 l_unlock(&ns->ns_lock);
371 struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *ns,
372 const struct lustre_handle *handle)
374 struct ldlm_lock *retval = NULL;
376 l_lock(&ns->ns_lock);
377 retval = __ldlm_handle2lock(handle, 0);
378 l_unlock(&ns->ns_lock);
383 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
385 struct obd_export *exp = lock->l_export?:lock->l_conn_export;
386 /* INODEBITS_INTEROP: If the other side does not support
387 * inodebits, reply with a plain lock descriptor.
389 if ((lock->l_resource->lr_type == LDLM_IBITS) &&
390 (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
391 struct ldlm_resource res = *lock->l_resource;
393 /* Make sure all the right bits are set in this lock we
394 are going to pass to client */
395 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
396 (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
397 "Inappropriate inode lock bits during "
398 "conversion " LPU64 "\n",
399 lock->l_policy_data.l_inodebits.bits);
400 res.lr_type = LDLM_PLAIN;
401 ldlm_res2desc(&res, &desc->l_resource);
402 /* Convert "new" lock mode to something old client can
404 if ((lock->l_req_mode == LCK_CR) ||
405 (lock->l_req_mode == LCK_CW))
406 desc->l_req_mode = LCK_PR;
408 desc->l_req_mode = lock->l_req_mode;
409 if ((lock->l_granted_mode == LCK_CR) ||
410 (lock->l_granted_mode == LCK_CW)) {
411 desc->l_granted_mode = LCK_PR;
413 /* We never grant PW/EX locks to clients */
414 LASSERT((lock->l_granted_mode != LCK_PW) &&
415 (lock->l_granted_mode != LCK_EX));
416 desc->l_granted_mode = lock->l_granted_mode;
419 /* We do not copy policy here, because there is no
420 policy for plain locks */
422 ldlm_res2desc(lock->l_resource, &desc->l_resource);
423 desc->l_req_mode = lock->l_req_mode;
424 desc->l_granted_mode = lock->l_granted_mode;
425 desc->l_policy_data = lock->l_policy_data;
429 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
430 void *data, int datalen)
432 struct ldlm_ast_work *w;
435 l_lock(&lock->l_resource->lr_namespace->ns_lock);
436 if (new && (lock->l_flags & LDLM_FL_AST_SENT))
439 CDEBUG(D_OTHER, "lock %p incompatible; sending blocking AST.\n", lock);
441 OBD_ALLOC(w, sizeof(*w));
448 w->w_datalen = datalen;
450 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
451 lock->l_flags |= LDLM_FL_AST_SENT;
452 /* If the enqueuing client said so, tell the AST recipient to
453 * discard dirty data, rather than writing back. */
454 if (new->l_flags & LDLM_AST_DISCARD_DATA)
455 lock->l_flags |= LDLM_FL_DISCARD_DATA;
457 ldlm_lock2desc(new, &w->w_desc);
460 w->w_lock = LDLM_LOCK_GET(lock);
461 list_add(&w->w_list, lock->l_resource->lr_tmp);
464 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
467 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
469 struct ldlm_lock *lock;
471 lock = ldlm_handle2lock(lockh);
472 ldlm_lock_addref_internal(lock, mode);
476 /* only called for local locks */
477 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
479 l_lock(&lock->l_resource->lr_namespace->ns_lock);
480 ldlm_lock_remove_from_lru(lock);
481 if (mode & (LCK_NL | LCK_CR | LCK_PR))
483 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
485 lock->l_last_used = cfs_time_current();
487 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
488 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
491 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
493 struct ldlm_namespace *ns;
496 ns = lock->l_resource->lr_namespace;
497 l_lock(&ns->ns_lock);
498 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
499 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
500 LASSERT(lock->l_readers > 0);
503 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP)) {
504 LASSERT(lock->l_writers > 0);
508 if (lock->l_flags & LDLM_FL_LOCAL &&
509 !lock->l_readers && !lock->l_writers) {
510 /* If this is a local lock on a server namespace and this was
511 * the last reference, cancel the lock. */
512 CDEBUG(D_INFO, "forcing cancel of local lock\n");
513 lock->l_flags |= LDLM_FL_CBPENDING;
516 if (!lock->l_readers && !lock->l_writers &&
517 (lock->l_flags & LDLM_FL_CBPENDING)) {
518 /* If we received a blocked AST and this was the last reference,
519 * run the callback. */
520 if (ns->ns_client == LDLM_NAMESPACE_SERVER && lock->l_export)
521 CERROR("FL_CBPENDING set on non-local lock--just a "
524 LDLM_DEBUG(lock, "final decref done on cbpending lock");
526 LDLM_LOCK_GET(lock); /* dropped by bl thread */
527 ldlm_lock_remove_from_lru(lock);
528 l_unlock(&ns->ns_lock);
529 if (ldlm_bl_to_thread(ns, NULL, lock) != 0)
530 ldlm_handle_bl_callback(ns, NULL, lock);
531 } else if (ns->ns_client == LDLM_NAMESPACE_CLIENT &&
532 !lock->l_readers && !lock->l_writers &&
533 !(lock->l_flags & LDLM_FL_NO_LRU)) {
534 /* If this is a client-side namespace and this was the last
535 * reference, put it on the LRU. */
536 LASSERT(list_empty(&lock->l_lru));
537 LASSERT(ns->ns_nr_unused >= 0);
538 list_add_tail(&lock->l_lru, &ns->ns_unused_list);
540 l_unlock(&ns->ns_lock);
541 ldlm_cancel_lru(ns, LDLM_ASYNC);
543 l_unlock(&ns->ns_lock);
546 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
551 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
553 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
554 LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
555 ldlm_lock_decref_internal(lock, mode);
559 /* This will drop a lock reference and mark it for destruction, but will not
560 * necessarily cancel the lock before returning. */
561 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
563 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
566 LASSERT(lock != NULL);
568 l_lock(&lock->l_resource->lr_namespace->ns_lock);
569 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
570 lock->l_flags |= LDLM_FL_CBPENDING;
571 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
572 ldlm_lock_decref_internal(lock, mode);
577 * - ldlm_lock_enqueue
578 * - ldlm_reprocess_queue
579 * - ldlm_lock_convert
581 void ldlm_grant_lock(struct ldlm_lock *lock, void *data, int datalen,
584 struct ldlm_resource *res = lock->l_resource;
587 l_lock(&lock->l_resource->lr_namespace->ns_lock);
588 lock->l_granted_mode = lock->l_req_mode;
589 ldlm_resource_add_lock(res, &res->lr_granted, lock);
591 if (lock->l_granted_mode < res->lr_most_restr)
592 res->lr_most_restr = lock->l_granted_mode;
594 if (run_ast && lock->l_completion_ast != NULL)
595 ldlm_add_ast_work_item(lock, NULL, data, datalen);
597 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
601 /* returns a referenced lock or NULL. See the flag descriptions below, in the
602 * comment above ldlm_lock_match */
603 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
604 ldlm_policy_data_t *policy,
605 struct ldlm_lock *old_lock, int flags)
607 struct ldlm_lock *lock;
608 struct list_head *tmp;
610 list_for_each(tmp, queue) {
611 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
613 if (lock == old_lock)
616 /* llite sometimes wants to match locks that will be
617 * canceled when their users drop, but we allow it to match
618 * if it passes in CBPENDING and the lock still has users.
619 * this is generally only going to be used by children
620 * whose parents already hold a lock so forward progress
621 * can still happen. */
622 if (lock->l_flags & LDLM_FL_CBPENDING &&
623 !(flags & LDLM_FL_CBPENDING))
625 if (lock->l_flags & LDLM_FL_CBPENDING &&
626 lock->l_readers == 0 && lock->l_writers == 0)
629 if (!(lock->l_req_mode & mode))
632 if (lock->l_resource->lr_type == LDLM_EXTENT &&
633 (lock->l_policy_data.l_extent.start >
634 policy->l_extent.start ||
635 lock->l_policy_data.l_extent.end < policy->l_extent.end))
638 if (unlikely(mode == LCK_GROUP) &&
639 lock->l_resource->lr_type == LDLM_EXTENT &&
640 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
643 /* We match if we have existing lock with same or wider set
645 if (lock->l_resource->lr_type == LDLM_IBITS &&
646 ((lock->l_policy_data.l_inodebits.bits &
647 policy->l_inodebits.bits) !=
648 policy->l_inodebits.bits))
651 if (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED))
654 if ((flags & LDLM_FL_LOCAL_ONLY) &&
655 !(lock->l_flags & LDLM_FL_LOCAL))
658 if (flags & LDLM_FL_TEST_LOCK)
661 ldlm_lock_addref_internal(lock, mode);
668 void ldlm_lock_allow_match(struct ldlm_lock *lock)
670 l_lock(&lock->l_resource->lr_namespace->ns_lock);
671 lock->l_flags |= LDLM_FL_CAN_MATCH;
672 cfs_waitq_signal(&lock->l_waitq);
673 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
676 /* Can be called in two ways:
678 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
679 * for a duplicate of.
681 * Otherwise, all of the fields must be filled in, to match against.
683 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
684 * server (ie, connh is NULL)
685 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
686 * list will be considered
687 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
688 * to be canceled can still be matched as long as they still have reader
689 * or writer refernces
690 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
691 * just tell us if we would have matched.
693 * Returns 1 if it finds an already-existing lock that is compatible; in this
694 * case, lockh is filled in with a addref()ed lock
696 int ldlm_lock_match(struct ldlm_namespace *ns, int flags,
697 struct ldlm_res_id *res_id, ldlm_type_t type,
698 ldlm_policy_data_t *policy, ldlm_mode_t mode,
699 struct lustre_handle *lockh)
701 struct ldlm_resource *res;
702 struct ldlm_lock *lock, *old_lock = NULL;
707 old_lock = ldlm_handle2lock(lockh);
710 ns = old_lock->l_resource->lr_namespace;
711 res_id = &old_lock->l_resource->lr_name;
712 type = old_lock->l_resource->lr_type;
713 mode = old_lock->l_req_mode;
716 res = ldlm_resource_get(ns, NULL, *res_id, type, 0);
718 LASSERT(old_lock == NULL);
722 l_lock(&ns->ns_lock);
724 lock = search_queue(&res->lr_granted, mode, policy, old_lock, flags);
727 if (flags & LDLM_FL_BLOCK_GRANTED)
729 lock = search_queue(&res->lr_converting, mode, policy, old_lock, flags);
732 lock = search_queue(&res->lr_waiting, mode, policy, old_lock, flags);
738 ldlm_resource_putref(res);
739 l_unlock(&ns->ns_lock);
742 ldlm_lock2handle(lock, lockh);
743 if (!(lock->l_flags & LDLM_FL_CAN_MATCH)) {
744 struct l_wait_info lwi;
745 if (lock->l_completion_ast) {
746 int err = lock->l_completion_ast(lock,
747 LDLM_FL_WAIT_NOREPROC,
750 if (flags & LDLM_FL_TEST_LOCK)
753 ldlm_lock_decref_internal(lock, mode);
759 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout), NULL,NULL,NULL);
761 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
762 l_wait_event(lock->l_waitq,
763 (lock->l_flags & LDLM_FL_CAN_MATCH), &lwi);
768 l_lock(&ns->ns_lock);
769 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
770 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
771 res_id->name[2] : policy->l_extent.start,
772 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
773 res_id->name[3] : policy->l_extent.end);
774 l_unlock(&ns->ns_lock);
775 } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
776 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
777 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
778 type, mode, res_id->name[0], res_id->name[1],
779 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
780 res_id->name[2] :policy->l_extent.start,
781 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
782 res_id->name[3] : policy->l_extent.end);
785 LDLM_LOCK_PUT(old_lock);
786 if (flags & LDLM_FL_TEST_LOCK && rc)
792 /* Returns a referenced lock */
793 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
794 const struct lustre_handle *parent_lock_handle,
795 const struct ldlm_res_id res_id,
798 ldlm_blocking_callback blocking,
799 ldlm_completion_callback completion,
800 ldlm_glimpse_callback glimpse,
801 void *data, __u32 lvb_len)
803 struct ldlm_resource *res, *parent_res = NULL;
804 struct ldlm_lock *lock, *parent_lock = NULL;
807 if (parent_lock_handle) {
808 parent_lock = ldlm_handle2lock(parent_lock_handle);
810 parent_res = parent_lock->l_resource;
813 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
817 lock = ldlm_lock_new(parent_lock, res);
818 ldlm_resource_putref(res);
819 if (parent_lock != NULL)
820 LDLM_LOCK_PUT(parent_lock);
825 lock->l_req_mode = mode;
826 lock->l_ast_data = data;
827 lock->l_blocking_ast = blocking;
828 lock->l_completion_ast = completion;
829 lock->l_glimpse_ast = glimpse;
830 lock->l_pid = cfs_curproc_pid();
833 lock->l_lvb_len = lvb_len;
834 OBD_ALLOC(lock->l_lvb_data, lvb_len);
835 if (lock->l_lvb_data == NULL) {
836 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
844 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
845 struct ldlm_lock **lockp,
846 void *cookie, int *flags)
848 struct ldlm_lock *lock = *lockp;
849 struct ldlm_resource *res = lock->l_resource;
850 int local = res->lr_namespace->ns_client;
851 ldlm_processing_policy policy;
852 ldlm_error_t rc = ELDLM_OK;
855 do_gettimeofday(&lock->l_enqueued_time);
856 /* policies are not executed on the client or during replay */
857 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
858 && !local && ns->ns_policy) {
859 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
861 if (rc == ELDLM_LOCK_REPLACED) {
862 /* The lock that was returned has already been granted,
863 * and placed into lockp. If it's not the same as the
864 * one we passed in, then destroy the old one and our
865 * work here is done. */
866 if (lock != *lockp) {
867 ldlm_lock_destroy(lock);
870 *flags |= LDLM_FL_LOCK_CHANGED;
872 } else if (rc != ELDLM_OK ||
873 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
874 ldlm_lock_destroy(lock);
879 l_lock(&ns->ns_lock);
880 if (local && lock->l_req_mode == lock->l_granted_mode) {
881 /* The server returned a blocked lock, but it was granted before
882 * we got a chance to actually enqueue it. We don't need to do
884 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
885 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
889 /* Some flags from the enqueue want to make it into the AST, via the
891 lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
893 /* This distinction between local lock trees is very important; a client
894 * namespace only has information about locks taken by that client, and
895 * thus doesn't have enough information to decide for itself if it can
896 * be granted (below). In this case, we do exactly what the server
897 * tells us to do, as dictated by the 'flags'.
899 * We do exactly the same thing during recovery, when the server is
900 * more or less trusting the clients not to lie.
902 * FIXME (bug 268): Detect obvious lies by checking compatibility in
903 * granted/converting queues. */
904 ldlm_resource_unlink_lock(lock);
906 if (*flags & LDLM_FL_BLOCK_CONV)
907 ldlm_resource_add_lock(res, &res->lr_converting, lock);
908 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
909 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
911 ldlm_grant_lock(lock, NULL, 0, 0);
913 } else if (*flags & LDLM_FL_REPLAY) {
914 if (*flags & LDLM_FL_BLOCK_CONV) {
915 ldlm_resource_add_lock(res, &res->lr_converting, lock);
917 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
918 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
920 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
921 ldlm_grant_lock(lock, NULL, 0, 0);
924 /* If no flags, fall through to normal enqueue path. */
927 policy = ldlm_processing_policy_table[res->lr_type];
928 policy(lock, flags, 1, &rc);
931 l_unlock(&ns->ns_lock);
935 /* Must be called with namespace taken: queue is waiting or converting. */
936 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue)
938 struct list_head *tmp, *pos;
939 ldlm_processing_policy policy;
941 int rc = LDLM_ITER_CONTINUE;
945 policy = ldlm_processing_policy_table[res->lr_type];
948 list_for_each_safe(tmp, pos, queue) {
949 struct ldlm_lock *pending;
950 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
952 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
955 rc = policy(pending, &flags, 0, &err);
956 if (rc != LDLM_ITER_CONTINUE)
963 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list)
965 struct list_head *tmp, *pos;
969 l_check_no_ns_lock(ns);
971 list_for_each_safe(tmp, pos, rpc_list) {
972 struct ldlm_ast_work *w =
973 list_entry(tmp, struct ldlm_ast_work, w_list);
975 /* It's possible to receive a completion AST before we've set
976 * the l_completion_ast pointer: either because the AST arrived
977 * before the reply, or simply because there's a small race
978 * window between receiving the reply and finishing the local
981 * This can't happen with the blocking_ast, however, because we
982 * will never call the local blocking_ast until we drop our
983 * reader/writer reference, which we won't do until we get the
984 * reply and finish enqueueing. */
985 LASSERT(w->w_lock != NULL);
987 LASSERT(w->w_lock->l_blocking_ast != NULL);
988 rc = w->w_lock->l_blocking_ast
989 (w->w_lock, &w->w_desc, w->w_data,
991 } else if (w->w_lock->l_completion_ast != NULL) {
992 rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags,
1001 CDEBUG(D_DLMTRACE, "Failed AST - should clean & "
1002 "disconnect client\n");
1003 LDLM_LOCK_PUT(w->w_lock);
1004 list_del(&w->w_list);
1005 OBD_FREE(w, sizeof(*w));
1010 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1012 ldlm_reprocess_all(res);
1013 return LDLM_ITER_CONTINUE;
1016 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1021 l_lock(&ns->ns_lock);
1022 for (i = 0; i < RES_HASH_SIZE; i++) {
1023 struct list_head *tmp, *next;
1024 list_for_each_safe(tmp, next, &(ns->ns_hash[i])) {
1025 struct ldlm_resource *res =
1026 list_entry(tmp, struct ldlm_resource, lr_hash);
1028 ldlm_resource_getref(res);
1029 l_unlock(&ns->ns_lock);
1030 rc = reprocess_one_queue(res, NULL);
1031 l_lock(&ns->ns_lock);
1033 ldlm_resource_putref(res);
1034 if (rc == LDLM_ITER_STOP)
1039 l_unlock(&ns->ns_lock);
1043 void ldlm_reprocess_all(struct ldlm_resource *res)
1045 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1049 /* Local lock trees don't get reprocessed. */
1050 if (res->lr_namespace->ns_client) {
1056 l_lock(&res->lr_namespace->ns_lock);
1057 res->lr_tmp = &rpc_list;
1059 rc = ldlm_reprocess_queue(res, &res->lr_converting);
1060 if (rc == LDLM_ITER_CONTINUE)
1061 ldlm_reprocess_queue(res, &res->lr_waiting);
1064 l_unlock(&res->lr_namespace->ns_lock);
1066 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
1067 if (rc == -ERESTART) {
1068 LASSERT(list_empty(&rpc_list));
1074 void ldlm_cancel_callback(struct ldlm_lock *lock)
1076 struct ldlm_namespace *ns;
1078 ns = lock->l_resource->lr_namespace;
1079 l_lock(&ns->ns_lock);
1080 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1081 lock->l_flags |= LDLM_FL_CANCEL;
1082 if (lock->l_blocking_ast) {
1083 l_unlock(&ns->ns_lock);
1084 // l_check_no_ns_lock(ns);
1085 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1089 LDLM_DEBUG(lock, "no blocking ast");
1092 l_unlock(&ns->ns_lock);
1095 void ldlm_lock_cancel(struct ldlm_lock *lock)
1097 struct ldlm_resource *res;
1098 struct ldlm_namespace *ns;
1101 res = lock->l_resource;
1102 ns = res->lr_namespace;
1104 l_lock(&ns->ns_lock);
1105 ldlm_del_waiting_lock(lock);
1107 /* Please do not, no matter how tempting, remove this LBUG without
1108 * talking to me first. -phik */
1109 if (lock->l_readers || lock->l_writers) {
1110 LDLM_ERROR(lock, "lock still has references");
1114 ldlm_cancel_callback(lock);
1116 ldlm_resource_unlink_lock(lock);
1117 ldlm_lock_destroy(lock);
1118 l_unlock(&ns->ns_lock);
1122 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1124 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1130 lock->l_ast_data = data;
1131 LDLM_LOCK_PUT(lock);
1135 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1137 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
1138 struct ldlm_lock *lock;
1139 struct ldlm_resource *res;
1141 l_lock(&ns->ns_lock);
1142 while(!list_empty(&exp->exp_ldlm_data.led_held_locks)) {
1143 lock = list_entry(exp->exp_ldlm_data.led_held_locks.next,
1144 struct ldlm_lock, l_export_chain);
1145 res = ldlm_resource_getref(lock->l_resource);
1146 LDLM_DEBUG(lock, "export %p", exp);
1147 ldlm_lock_cancel(lock);
1148 l_unlock(&ns->ns_lock);
1149 ldlm_reprocess_all(res);
1150 ldlm_resource_putref(res);
1151 l_lock(&ns->ns_lock);
1153 l_unlock(&ns->ns_lock);
1156 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1159 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1160 struct ldlm_resource *res;
1161 struct ldlm_namespace *ns;
1167 if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1168 *flags |= LDLM_FL_BLOCK_GRANTED;
1169 RETURN(lock->l_resource);
1172 LASSERTF(new_mode == LCK_PW && lock->l_granted_mode == LCK_PR,
1173 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1175 res = lock->l_resource;
1176 ns = res->lr_namespace;
1178 l_lock(&ns->ns_lock);
1180 old_mode = lock->l_req_mode;
1181 lock->l_req_mode = new_mode;
1182 ldlm_resource_unlink_lock(lock);
1184 /* If this is a local resource, put it on the appropriate list. */
1185 if (res->lr_namespace->ns_client) {
1186 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1187 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1189 /* This should never happen, because of the way the
1190 * server handles conversions. */
1191 LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1195 res->lr_tmp = &rpc_list;
1196 ldlm_grant_lock(lock, NULL, 0, 0);
1199 /* FIXME: completion handling not with ns_lock held ! */
1200 if (lock->l_completion_ast)
1201 lock->l_completion_ast(lock, 0, NULL);
1205 ldlm_processing_policy policy;
1206 policy = ldlm_processing_policy_table[res->lr_type];
1207 res->lr_tmp = &rpc_list;
1208 rc = policy(lock, &pflags, 0, &err);
1210 if (rc == LDLM_ITER_STOP) {
1211 lock->l_req_mode = old_mode;
1212 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1215 *flags |= LDLM_FL_BLOCK_GRANTED;
1220 l_unlock(&ns->ns_lock);
1223 ldlm_run_ast_work(ns, &rpc_list);
1227 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1229 struct obd_device *obd = NULL;
1231 if (!((libcfs_debug | D_ERROR) & level))
1235 CDEBUG_EX(level, " NULL LDLM lock\n");
1239 CDEBUG_EX(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1240 lock, lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
1242 if (lock->l_conn_export != NULL)
1243 obd = lock->l_conn_export->exp_obd;
1244 if (lock->l_export && lock->l_export->exp_connection) {
1245 CDEBUG_EX(level, " Node: NID %s (rhandle: "LPX64")\n",
1246 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1247 lock->l_remote_handle.cookie);
1248 } else if (obd == NULL) {
1249 CDEBUG_EX(level, " Node: local\n");
1251 struct obd_import *imp = obd->u.cli.cl_import;
1252 CDEBUG_EX(level, " Node: NID %s (rhandle: "LPX64")\n",
1253 libcfs_nid2str(imp->imp_connection->c_peer.nid),
1254 lock->l_remote_handle.cookie);
1256 CDEBUG_EX(level, " Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1258 lock->l_resource->lr_name.name[0],
1259 lock->l_resource->lr_name.name[1],
1260 lock->l_resource->lr_name.name[2]);
1261 CDEBUG_EX(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1262 "write: %d flags: %#x\n", ldlm_lockname[lock->l_req_mode],
1263 ldlm_lockname[lock->l_granted_mode],
1264 atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1266 if (lock->l_resource->lr_type == LDLM_EXTENT)
1267 CDEBUG_EX(level, " Extent: "LPU64" -> "LPU64
1268 " (req "LPU64"-"LPU64")\n",
1269 lock->l_policy_data.l_extent.start,
1270 lock->l_policy_data.l_extent.end,
1271 lock->l_req_extent.start, lock->l_req_extent.end);
1272 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1273 CDEBUG_EX(level, " Pid: %d Extent: "LPU64" -> "LPU64"\n",
1274 lock->l_policy_data.l_flock.pid,
1275 lock->l_policy_data.l_flock.start,
1276 lock->l_policy_data.l_flock.end);
1277 else if (lock->l_resource->lr_type == LDLM_IBITS)
1278 CDEBUG_EX(level, " Bits: "LPX64"\n",
1279 lock->l_policy_data.l_inodebits.bits);
1282 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1284 struct ldlm_lock *lock;
1286 lock = ldlm_handle2lock(lockh);
1290 ldlm_lock_dump(D_OTHER, lock, 0);
1292 LDLM_LOCK_PUT(lock);