1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/lustre_mds.h>
31 #include <linux/obd_class.h>
33 /* this lock protects ldlm_handle2lock's integrity */
34 static spinlock_t ldlm_handle_lock = SPIN_LOCK_UNLOCKED;
37 char *ldlm_lockname[] = {
46 char *ldlm_typename[] = {
49 [LDLM_MDSINTENT] "INT"
52 char *ldlm_it2str(int it)
59 case (IT_OPEN | IT_CREAT):
90 CERROR("Unknown intent %d\n", it);
95 extern kmem_cache_t *ldlm_lock_slab;
97 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
99 ldlm_res_compat ldlm_res_compat_table[] = {
100 [LDLM_PLAIN] ldlm_plain_compat,
101 [LDLM_EXTENT] ldlm_extent_compat,
102 [LDLM_MDSINTENT] ldlm_plain_compat
105 ldlm_res_policy ldlm_res_policy_table[] = {
107 [LDLM_EXTENT] ldlm_extent_policy,
108 [LDLM_MDSINTENT] NULL
111 void ldlm_register_intent(int (*arg) (struct ldlm_lock * lock, void *req_cookie,
112 ldlm_mode_t mode, void *data))
114 ldlm_res_policy_table[LDLM_MDSINTENT] = arg;
117 void ldlm_unregister_intent(void)
119 ldlm_res_policy_table[LDLM_MDSINTENT] = NULL;
123 * REFCOUNTED LOCK OBJECTS
128 * Lock refcounts, during creation:
129 * - one special one for allocation, dec'd only once in destroy
130 * - one for being a lock that's in-use
131 * - one for the addref associated with a new lock
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
135 l_lock(&lock->l_resource->lr_namespace->ns_lock);
137 ldlm_resource_getref(lock->l_resource);
138 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
142 void ldlm_lock_put(struct ldlm_lock *lock)
144 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
147 l_lock(&ns->ns_lock);
149 //LDLM_DEBUG(lock, "after refc--");
150 if (lock->l_refc < 0)
153 ldlm_resource_put(lock->l_resource);
155 LDLM_LOCK_PUT(lock->l_parent);
157 if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
158 l_unlock(&ns->ns_lock);
159 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
161 //spin_lock(&ldlm_handle_lock);
162 spin_lock(&ns->ns_counter_lock);
164 spin_unlock(&ns->ns_counter_lock);
166 lock->l_resource = NULL;
167 if (lock->l_export && lock->l_export->exp_connection)
168 ptlrpc_put_connection(lock->l_export->exp_connection);
169 kmem_cache_free(ldlm_lock_slab, lock);
170 //spin_unlock(&ldlm_handle_lock);
171 CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 0).\n",
172 sizeof(*lock), lock);
174 l_unlock(&ns->ns_lock);
179 void ldlm_lock_destroy(struct ldlm_lock *lock)
182 l_lock(&lock->l_resource->lr_namespace->ns_lock);
184 if (!list_empty(&lock->l_children)) {
185 LDLM_DEBUG(lock, "still has children (%p)!",
186 lock->l_children.next);
187 ldlm_lock_dump(lock);
190 if (lock->l_readers || lock->l_writers) {
191 LDLM_DEBUG(lock, "lock still has references");
192 ldlm_lock_dump(lock);
196 if (!list_empty(&lock->l_res_link)) {
197 ldlm_lock_dump(lock);
201 if (lock->l_flags & LDLM_FL_DESTROYED) {
202 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
207 list_del(&lock->l_export_chain);
208 lock->l_export = NULL;
209 lock->l_flags |= LDLM_FL_DESTROYED;
211 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
217 usage: pass in a resource on which you have done get
218 pass in a parent lock on which you have done a get
219 do not put the resource or the parent
220 returns: lock with refcount 1
222 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
223 struct ldlm_resource *resource)
225 struct ldlm_lock *lock;
228 if (resource == NULL)
231 lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
235 memset(lock, 0, sizeof(*lock));
236 get_random_bytes(&lock->l_random, sizeof(__u64));
238 lock->l_resource = resource;
239 /* this refcount matches the one of the resource passed
240 in which is not being put away */
242 INIT_LIST_HEAD(&lock->l_children);
243 INIT_LIST_HEAD(&lock->l_res_link);
244 INIT_LIST_HEAD(&lock->l_export_chain);
245 INIT_LIST_HEAD(&lock->l_pending_chain);
246 init_waitqueue_head(&lock->l_waitq);
248 spin_lock(&resource->lr_namespace->ns_counter_lock);
249 resource->lr_namespace->ns_locks++;
250 spin_unlock(&resource->lr_namespace->ns_counter_lock);
252 if (parent != NULL) {
253 l_lock(&parent->l_resource->lr_namespace->ns_lock);
254 lock->l_parent = parent;
255 list_add(&lock->l_childof, &parent->l_children);
256 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
259 CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
260 "%p (tot %d).\n", sizeof(*lock), lock, 1);
261 /* this is the extra refcount, to prevent the lock from evaporating */
266 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
268 struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
269 struct ldlm_resource *oldres = lock->l_resource;
273 l_lock(&ns->ns_lock);
274 if (memcmp(new_resid, lock->l_resource->lr_name,
275 sizeof(lock->l_resource->lr_name)) == 0) {
277 l_unlock(&ns->ns_lock);
281 type = lock->l_resource->lr_type;
282 if (new_resid[0] == 0)
284 lock->l_resource = ldlm_resource_get(ns, NULL, new_resid, type, 1);
285 if (lock->l_resource == NULL) {
290 /* move references over */
291 for (i = 0; i < lock->l_refc; i++) {
293 ldlm_resource_getref(lock->l_resource);
294 rc = ldlm_resource_put(oldres);
295 if (rc == 1 && i != lock->l_refc - 1)
298 /* compensate for the initial get above.. */
299 ldlm_resource_put(lock->l_resource);
301 l_unlock(&ns->ns_lock);
309 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
311 lockh->addr = (__u64) (unsigned long)lock;
312 lockh->cookie = lock->l_random;
315 struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
317 struct ldlm_lock *lock = NULL, *retval = NULL;
320 if (!handle || !handle->addr) {
321 CERROR("bogus handle %p->"LPX64"\n", handle,
322 handle ? handle->addr : -1);
326 //spin_lock(&ldlm_handle_lock);
327 lock = (struct ldlm_lock *)(unsigned long)(handle->addr);
328 if (!kmem_cache_validate(ldlm_lock_slab, (void *)lock)) {
329 CERROR("bogus lock %p\n", lock);
333 if (!lock->l_resource) {
334 CERROR("trying to lock bogus resource: lock %p\n", lock);
335 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
338 if (!lock->l_resource->lr_namespace) {
339 CERROR("trying to lock bogus namespace: lock %p\n", lock);
340 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
343 if (lock->l_random != handle->cookie) {
344 CERROR("bogus cookie: lock "LPX64", handle "LPX64"\n",
345 lock->l_random, handle->cookie);
349 l_lock(&lock->l_resource->lr_namespace->ns_lock);
350 if (lock->l_flags & LDLM_FL_DESTROYED) {
351 CERROR("lock already destroyed: lock %p\n", lock);
352 LDLM_DEBUG(lock, "ldlm_handle2lock(%p)", lock);
356 retval = LDLM_LOCK_GET(lock);
358 CERROR("lock disappeared below us!!! %p\n", lock);
361 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
363 //spin_unlock(&ldlm_handle_lock);
367 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
369 return lockmode_compat(a->l_req_mode, b->l_req_mode);
372 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
374 ldlm_res2desc(lock->l_resource, &desc->l_resource);
375 desc->l_req_mode = lock->l_req_mode;
376 desc->l_granted_mode = lock->l_granted_mode;
377 memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
378 memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
381 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
382 struct ldlm_lock *new)
384 struct ldlm_ast_work *w;
387 l_lock(&lock->l_resource->lr_namespace->ns_lock);
388 if (new && (lock->l_flags & LDLM_FL_AST_SENT))
391 OBD_ALLOC(w, sizeof(*w));
398 lock->l_flags |= LDLM_FL_AST_SENT;
400 ldlm_lock2desc(new, &w->w_desc);
403 w->w_lock = LDLM_LOCK_GET(lock);
404 list_add(&w->w_list, lock->l_resource->lr_tmp);
406 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
410 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
412 struct ldlm_lock *lock;
414 lock = ldlm_handle2lock(lockh);
415 ldlm_lock_addref_internal(lock, mode);
419 /* only called for local locks */
420 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
422 l_lock(&lock->l_resource->lr_namespace->ns_lock);
423 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
427 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
429 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
432 /* Args: unlocked lock */
433 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
435 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
441 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
442 l_lock(&lock->l_resource->lr_namespace->ns_lock);
443 if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
448 /* If we received a blocked AST and this was the last reference,
449 * run the callback. */
450 if (!lock->l_readers && !lock->l_writers &&
451 (lock->l_flags & LDLM_FL_CBPENDING)) {
452 if (!lock->l_resource->lr_namespace->ns_client &&
454 CERROR("FL_CBPENDING set on non-local lock--just a "
457 LDLM_DEBUG(lock, "final decref done on cbpending lock");
458 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
460 /* FIXME: need a real 'desc' here */
461 lock->l_blocking_ast(lock, NULL, lock->l_data,
462 lock->l_data_len, LDLM_CB_BLOCKING);
464 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
466 LDLM_LOCK_PUT(lock); /* matches the ldlm_lock_get in addref */
467 LDLM_LOCK_PUT(lock); /* matches the handle2lock above */
472 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
473 struct list_head *queue)
475 struct list_head *tmp, *pos;
478 list_for_each_safe(tmp, pos, queue) {
479 struct ldlm_lock *child;
480 ldlm_res_compat compat;
482 child = list_entry(tmp, struct ldlm_lock, l_res_link);
486 compat = ldlm_res_compat_table[child->l_resource->lr_type];
487 if (compat && compat(child, lock)) {
488 CDEBUG(D_OTHER, "compat function succeded, next.\n");
491 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
492 CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
498 if (send_cbs && child->l_blocking_ast != NULL) {
499 CDEBUG(D_OTHER, "lock %p incompatible; sending "
500 "blocking AST.\n", child);
501 ldlm_add_ast_work_item(child, lock);
508 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
513 l_lock(&lock->l_resource->lr_namespace->ns_lock);
514 rc = ldlm_lock_compat_list(lock, send_cbs,
515 &lock->l_resource->lr_granted);
516 /* FIXME: should we be sending ASTs to converting? */
518 rc = ldlm_lock_compat_list
519 (lock, send_cbs, &lock->l_resource->lr_converting);
521 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
526 - ldlm_handle_enqueuque - resource
528 void ldlm_grant_lock(struct ldlm_lock *lock)
530 struct ldlm_resource *res = lock->l_resource;
533 l_lock(&lock->l_resource->lr_namespace->ns_lock);
534 ldlm_resource_add_lock(res, &res->lr_granted, lock);
535 lock->l_granted_mode = lock->l_req_mode;
537 if (lock->l_granted_mode < res->lr_most_restr)
538 res->lr_most_restr = lock->l_granted_mode;
540 if (lock->l_completion_ast) {
541 ldlm_add_ast_work_item(lock, NULL);
543 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
547 /* returns a referenced lock or NULL */
548 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
549 struct ldlm_extent *extent)
551 struct ldlm_lock *lock;
552 struct list_head *tmp;
554 list_for_each(tmp, queue) {
555 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
557 if (lock->l_flags & (LDLM_FL_CBPENDING | LDLM_FL_DESTROYED))
560 /* lock_convert() takes the resource lock, so we're sure that
561 * req_mode, lr_type, and l_cookie won't change beneath us */
562 if (lock->l_req_mode != mode)
565 if (lock->l_resource->lr_type == LDLM_EXTENT &&
566 (lock->l_extent.start > extent->start ||
567 lock->l_extent.end < extent->end))
570 ldlm_lock_addref_internal(lock, mode);
577 /* Must be called with no resource or lock locks held.
579 * Returns 1 if it finds an already-existing lock that is compatible; in this
580 * case, lockh is filled in with a addref()ed lock
582 int ldlm_lock_match(struct ldlm_namespace *ns, __u64 * res_id, __u32 type,
583 void *cookie, int cookielen, ldlm_mode_t mode,
584 struct lustre_handle *lockh)
586 struct ldlm_resource *res;
587 struct ldlm_lock *lock;
591 res = ldlm_resource_get(ns, NULL, res_id, type, 0);
595 ns = res->lr_namespace;
596 l_lock(&ns->ns_lock);
598 if ((lock = search_queue(&res->lr_granted, mode, cookie)))
600 if ((lock = search_queue(&res->lr_converting, mode, cookie)))
602 if ((lock = search_queue(&res->lr_waiting, mode, cookie)))
607 ldlm_resource_put(res);
608 l_unlock(&ns->ns_lock);
611 ldlm_lock2handle(lock, lockh);
612 if (lock->l_completion_ast)
613 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
616 LDLM_DEBUG(lock, "matched");
618 LDLM_DEBUG_NOLOCK("not matched");
622 /* Returns a referenced lock */
623 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
624 struct lustre_handle *parent_lock_handle,
625 __u64 * res_id, __u32 type,
626 ldlm_mode_t mode, void *data, __u32 data_len)
628 struct ldlm_resource *res, *parent_res = NULL;
629 struct ldlm_lock *lock, *parent_lock;
631 parent_lock = ldlm_handle2lock(parent_lock_handle);
633 parent_res = parent_lock->l_resource;
635 res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
639 lock = ldlm_lock_new(parent_lock, res);
641 ldlm_resource_put(res);
645 lock->l_req_mode = mode;
647 lock->l_data_len = data_len;
652 /* Must be called with lock->l_lock and lock->l_resource->lr_lock not held */
653 ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock * lock,
654 void *cookie, int cookie_len,
656 ldlm_completion_callback completion,
657 ldlm_blocking_callback blocking)
659 struct ldlm_resource *res;
661 ldlm_res_policy policy;
664 res = lock->l_resource;
665 lock->l_blocking_ast = blocking;
667 if (res->lr_type == LDLM_EXTENT)
668 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
670 /* policies are not executed on the client */
671 local = res->lr_namespace->ns_client;
672 if (!local && (policy = ldlm_res_policy_table[res->lr_type])) {
674 rc = policy(lock, cookie, lock->l_req_mode, NULL);
676 if (rc == ELDLM_LOCK_CHANGED) {
677 res = lock->l_resource;
678 *flags |= LDLM_FL_LOCK_CHANGED;
679 } else if (rc == ELDLM_LOCK_ABORTED) {
680 ldlm_lock_destroy(lock);
685 lock->l_cookie = cookie;
686 lock->l_cookie_len = cookie_len;
688 l_lock(&res->lr_namespace->ns_lock);
689 if (local && lock->l_req_mode == lock->l_granted_mode) {
690 /* The server returned a blocked lock, but it was granted before
691 * we got a chance to actually enqueue it. We don't need to do
693 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
694 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
698 /* This distinction between local lock trees is very important; a client
699 * namespace only has information about locks taken by that client, and
700 * thus doesn't have enough information to decide for itself if it can
701 * be granted (below). In this case, we do exactly what the server
702 * tells us to do, as dictated by the 'flags' */
703 ldlm_resource_unlink_lock(lock);
705 if (*flags & LDLM_FL_BLOCK_CONV)
706 ldlm_resource_add_lock(res, res->lr_converting.prev,
708 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
709 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
711 ldlm_grant_lock(lock);
715 /* FIXME: We may want to optimize by checking lr_most_restr */
716 if (!list_empty(&res->lr_converting)) {
717 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
718 *flags |= LDLM_FL_BLOCK_CONV;
721 if (!list_empty(&res->lr_waiting)) {
722 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
723 *flags |= LDLM_FL_BLOCK_WAIT;
726 if (!ldlm_lock_compat(lock, 0)) {
727 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
728 *flags |= LDLM_FL_BLOCK_GRANTED;
732 ldlm_grant_lock(lock);
735 l_unlock(&res->lr_namespace->ns_lock);
736 /* Don't set 'completion_ast' until here so that if the lock is granted
737 * immediately we don't do an unnecessary completion call. */
738 lock->l_completion_ast = completion;
742 /* Must be called with namespace taken: queue is waiting or converting. */
743 static int ldlm_reprocess_queue(struct ldlm_resource *res,
744 struct list_head *queue)
746 struct list_head *tmp, *pos;
749 list_for_each_safe(tmp, pos, queue) {
750 struct ldlm_lock *pending;
751 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
753 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
755 if (!ldlm_lock_compat(pending, 1))
758 list_del_init(&pending->l_res_link);
759 ldlm_grant_lock(pending);
765 void ldlm_run_ast_work(struct list_head *rpc_list)
767 struct list_head *tmp, *pos;
771 list_for_each_safe(tmp, pos, rpc_list) {
772 struct ldlm_ast_work *w =
773 list_entry(tmp, struct ldlm_ast_work, w_list);
776 rc = w->w_lock->l_blocking_ast
777 (w->w_lock, &w->w_desc, w->w_data,
778 w->w_datalen, LDLM_CB_BLOCKING);
780 rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
782 CERROR("Failed AST - should clean & disconnect "
784 LDLM_LOCK_PUT(w->w_lock);
785 list_del(&w->w_list);
786 OBD_FREE(w, sizeof(*w));
791 /* Must be called with resource->lr_lock not taken. */
792 void ldlm_reprocess_all(struct ldlm_resource *res)
794 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
797 /* Local lock trees don't get reprocessed. */
798 if (res->lr_namespace->ns_client) {
803 l_lock(&res->lr_namespace->ns_lock);
804 res->lr_tmp = &rpc_list;
806 ldlm_reprocess_queue(res, &res->lr_converting);
807 if (list_empty(&res->lr_converting))
808 ldlm_reprocess_queue(res, &res->lr_waiting);
811 l_unlock(&res->lr_namespace->ns_lock);
813 ldlm_run_ast_work(&rpc_list);
817 void ldlm_cancel_callback(struct ldlm_lock *lock)
819 l_lock(&lock->l_resource->lr_namespace->ns_lock);
820 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
821 lock->l_flags |= LDLM_FL_CANCEL;
822 lock->l_blocking_ast(lock, NULL, lock->l_data,
823 lock->l_data_len, LDLM_CB_CANCELING);
825 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
828 void ldlm_lock_cancel(struct ldlm_lock *lock)
830 struct ldlm_resource *res;
831 struct ldlm_namespace *ns;
834 res = lock->l_resource;
835 ns = res->lr_namespace;
837 l_lock(&ns->ns_lock);
838 if (lock->l_readers || lock->l_writers)
839 CDEBUG(D_INFO, "lock still has references (%d readers, %d "
840 "writers)\n", lock->l_readers, lock->l_writers);
842 ldlm_cancel_callback(lock);
844 ldlm_del_waiting_lock(lock);
845 ldlm_resource_unlink_lock(lock);
846 ldlm_lock_destroy(lock);
847 l_unlock(&ns->ns_lock);
851 void ldlm_cancel_locks_for_export(struct obd_export *exp)
853 struct list_head *iter, *n; /* MUST BE CALLED "n"! */
855 list_for_each_safe(iter, n, &exp->exp_ldlm_data.led_held_locks) {
856 struct ldlm_lock *lock;
857 struct ldlm_resource *res;
858 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
859 res = ldlm_resource_getref(lock->l_resource);
860 LDLM_DEBUG(lock, "cancelling lock for export %p", exp);
861 ldlm_lock_cancel(lock);
862 ldlm_reprocess_all(res);
863 ldlm_resource_put(res);
867 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
870 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
871 struct ldlm_resource *res;
872 struct ldlm_namespace *ns;
876 res = lock->l_resource;
877 ns = res->lr_namespace;
879 l_lock(&ns->ns_lock);
881 lock->l_req_mode = new_mode;
882 ldlm_resource_unlink_lock(lock);
884 /* If this is a local resource, put it on the appropriate list. */
885 if (res->lr_namespace->ns_client) {
886 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED))
887 ldlm_resource_add_lock(res, res->lr_converting.prev,
890 /* This should never happen, because of the way the
891 * server handles conversions. */
894 res->lr_tmp = &rpc_list;
895 ldlm_grant_lock(lock);
898 /* FIXME: completion handling not with ns_lock held ! */
899 if (lock->l_completion_ast)
900 lock->l_completion_ast(lock, 0);
903 /* FIXME: We should try the conversion right away and possibly
904 * return success without the need for an extra AST */
905 ldlm_resource_add_lock(res, res->lr_converting.prev, lock);
906 *flags |= LDLM_FL_BLOCK_CONV;
909 l_unlock(&ns->ns_lock);
912 ldlm_run_ast_work(&rpc_list);
916 void ldlm_lock_dump(struct ldlm_lock *lock)
920 if (!(portal_debug & D_OTHER))
923 if (RES_VERSION_SIZE != 4)
927 CDEBUG(D_OTHER, " NULL LDLM lock\n");
931 snprintf(ver, sizeof(ver), "%x %x %x %x",
932 lock->l_version[0], lock->l_version[1],
933 lock->l_version[2], lock->l_version[3]);
935 CDEBUG(D_OTHER, " -- Lock dump: %p (%s)\n", lock, ver);
936 if (lock->l_export && lock->l_export->exp_connection)
937 CDEBUG(D_OTHER, " Node: NID %x (rhandle: "LPX64")\n",
938 lock->l_export->exp_connection->c_peer.peer_nid,
939 lock->l_remote_handle.addr);
941 CDEBUG(D_OTHER, " Node: local\n");
942 CDEBUG(D_OTHER, " Parent: %p\n", lock->l_parent);
943 CDEBUG(D_OTHER, " Resource: %p ("LPD64")\n", lock->l_resource,
944 lock->l_resource->lr_name[0]);
945 CDEBUG(D_OTHER, " Requested mode: %d, granted mode: %d\n",
946 (int)lock->l_req_mode, (int)lock->l_granted_mode);
947 CDEBUG(D_OTHER, " Readers: %u ; Writers; %u\n",
948 lock->l_readers, lock->l_writers);
949 if (lock->l_resource->lr_type == LDLM_EXTENT)
950 CDEBUG(D_OTHER, " Extent: %Lu -> %Lu\n",
951 (unsigned long long)lock->l_extent.start,
952 (unsigned long long)lock->l_extent.end);