1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_lock.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
45 # include <libcfs/libcfs.h>
46 # ifndef HAVE_VFS_INTENT_PATCHES
47 # include <linux/lustre_intent.h>
50 # include <liblustre.h>
53 #include <obd_class.h>
54 #include "ldlm_internal.h"
57 char *ldlm_lockname[] = {
69 char *ldlm_typename[] = {
76 char *ldlm_it2str(int it)
83 case (IT_OPEN | IT_CREAT):
96 CERROR("Unknown intent %d\n", it);
101 extern cfs_mem_cache_t *ldlm_lock_slab;
103 static ldlm_processing_policy ldlm_processing_policy_table[] = {
104 [LDLM_PLAIN] ldlm_process_plain_lock,
105 [LDLM_EXTENT] ldlm_process_extent_lock,
107 [LDLM_FLOCK] ldlm_process_flock_lock,
109 [LDLM_IBITS] ldlm_process_inodebits_lock,
112 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
114 return ldlm_processing_policy_table[res->lr_type];
117 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
123 * REFCOUNTED LOCK OBJECTS
128 * Lock refcounts, during creation:
129 * - one special one for allocation, dec'd only once in destroy
130 * - one for being a lock that's in-use
131 * - one for the addref associated with a new lock
133 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
135 cfs_atomic_inc(&lock->l_refc);
139 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
141 LASSERT(size == sizeof(*lock));
142 OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
145 void ldlm_lock_put(struct ldlm_lock *lock)
149 LASSERT(lock->l_resource != LP_POISON);
150 LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
151 if (cfs_atomic_dec_and_test(&lock->l_refc)) {
152 struct ldlm_resource *res;
155 "final lock_put on destroyed lock, freeing it.");
157 res = lock->l_resource;
158 LASSERT(lock->l_destroyed);
159 LASSERT(cfs_list_empty(&lock->l_res_link));
160 LASSERT(cfs_list_empty(&lock->l_pending_chain));
162 cfs_atomic_dec(&ldlm_res_to_ns(res)->ns_locks);
163 lu_ref_del(&res->lr_reference, "lock", lock);
164 ldlm_resource_putref(res);
165 lock->l_resource = NULL;
166 if (lock->l_export) {
167 class_export_lock_put(lock->l_export, lock);
168 lock->l_export = NULL;
171 if (lock->l_lvb_data != NULL)
172 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
174 ldlm_interval_free(ldlm_interval_detach(lock));
175 lu_ref_fini(&lock->l_reference);
176 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle,
183 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
186 if (!cfs_list_empty(&lock->l_lru)) {
187 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
189 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
190 cfs_list_del_init(&lock->l_lru);
191 if (lock->l_flags & LDLM_FL_SKIPPED)
192 lock->l_flags &= ~LDLM_FL_SKIPPED;
193 LASSERT(ns->ns_nr_unused > 0);
200 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
202 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
206 cfs_spin_lock(&ns->ns_unused_lock);
207 rc = ldlm_lock_remove_from_lru_nolock(lock);
208 cfs_spin_unlock(&ns->ns_unused_lock);
213 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
215 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
217 lock->l_last_used = cfs_time_current();
218 LASSERT(cfs_list_empty(&lock->l_lru));
219 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
220 cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
221 LASSERT(ns->ns_nr_unused >= 0);
225 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
227 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
230 cfs_spin_lock(&ns->ns_unused_lock);
231 ldlm_lock_add_to_lru_nolock(lock);
232 cfs_spin_unlock(&ns->ns_unused_lock);
236 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
238 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
241 cfs_spin_lock(&ns->ns_unused_lock);
242 if (!cfs_list_empty(&lock->l_lru)) {
243 ldlm_lock_remove_from_lru_nolock(lock);
244 ldlm_lock_add_to_lru_nolock(lock);
246 cfs_spin_unlock(&ns->ns_unused_lock);
250 /* This used to have a 'strict' flag, which recovery would use to mark an
251 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
252 * shall explain why it's gone: with the new hash table scheme, once you call
253 * ldlm_lock_destroy, you can never drop your final references on this lock.
254 * Because it's not in the hash table anymore. -phil */
255 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
259 if (lock->l_readers || lock->l_writers) {
260 LDLM_ERROR(lock, "lock still has references");
261 ldlm_lock_dump(D_ERROR, lock, 0);
265 if (!cfs_list_empty(&lock->l_res_link)) {
266 LDLM_ERROR(lock, "lock still on resource");
267 ldlm_lock_dump(D_ERROR, lock, 0);
271 if (lock->l_destroyed) {
272 LASSERT(cfs_list_empty(&lock->l_lru));
276 lock->l_destroyed = 1;
278 if (lock->l_export && lock->l_export->exp_lock_hash &&
279 !cfs_hlist_unhashed(&lock->l_exp_hash))
280 cfs_hash_del(lock->l_export->exp_lock_hash,
281 &lock->l_remote_handle, &lock->l_exp_hash);
283 ldlm_lock_remove_from_lru(lock);
284 class_handle_unhash(&lock->l_handle);
287 /* Wake anyone waiting for this lock */
288 /* FIXME: I should probably add yet another flag, instead of using
289 * l_export to only call this on clients */
291 class_export_put(lock->l_export);
292 lock->l_export = NULL;
293 if (lock->l_export && lock->l_completion_ast)
294 lock->l_completion_ast(lock, 0);
300 void ldlm_lock_destroy(struct ldlm_lock *lock)
304 lock_res_and_lock(lock);
305 first = ldlm_lock_destroy_internal(lock);
306 unlock_res_and_lock(lock);
308 /* drop reference from hashtable only for first destroy */
310 lu_ref_del(&lock->l_reference, "hash", lock);
311 LDLM_LOCK_RELEASE(lock);
316 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
320 first = ldlm_lock_destroy_internal(lock);
321 /* drop reference from hashtable only for first destroy */
323 lu_ref_del(&lock->l_reference, "hash", lock);
324 LDLM_LOCK_RELEASE(lock);
329 /* this is called by portals_handle2object with the handle lock taken */
330 static void lock_handle_addref(void *lock)
332 LDLM_LOCK_GET((struct ldlm_lock *)lock);
336 * usage: pass in a resource on which you have done ldlm_resource_get
337 * pass in a parent lock on which you have done a ldlm_lock_get
338 * after return, ldlm_*_put the resource and parent
339 * returns: lock with refcount 2 - one for current caller and one for remote
341 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
343 struct ldlm_lock *lock;
346 if (resource == NULL)
349 OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO);
353 cfs_spin_lock_init(&lock->l_lock);
354 lock->l_resource = ldlm_resource_getref(resource);
355 lu_ref_add(&resource->lr_reference, "lock", lock);
357 cfs_atomic_set(&lock->l_refc, 2);
358 CFS_INIT_LIST_HEAD(&lock->l_res_link);
359 CFS_INIT_LIST_HEAD(&lock->l_lru);
360 CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
361 CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
362 CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
363 CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
364 cfs_waitq_init(&lock->l_waitq);
365 lock->l_blocking_lock = NULL;
366 CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
367 CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
368 CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
370 cfs_atomic_inc(&ldlm_res_to_ns(resource)->ns_locks);
371 CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
372 class_handle_hash(&lock->l_handle, lock_handle_addref);
374 lu_ref_init(&lock->l_reference);
375 lu_ref_add(&lock->l_reference, "hash", lock);
376 lock->l_callback_timeout = 0;
378 #if LUSTRE_TRACKS_LOCK_EXP_REFS
379 CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
380 lock->l_exp_refs_nr = 0;
381 lock->l_exp_refs_target = NULL;
387 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
388 const struct ldlm_res_id *new_resid)
390 struct ldlm_resource *oldres = lock->l_resource;
391 struct ldlm_resource *newres;
395 LASSERT(ns_is_client(ns));
397 lock_res_and_lock(lock);
398 if (memcmp(new_resid, &lock->l_resource->lr_name,
399 sizeof(lock->l_resource->lr_name)) == 0) {
401 unlock_res_and_lock(lock);
405 LASSERT(new_resid->name[0] != 0);
407 /* This function assumes that the lock isn't on any lists */
408 LASSERT(cfs_list_empty(&lock->l_res_link));
410 type = oldres->lr_type;
411 unlock_res_and_lock(lock);
413 newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
414 lu_ref_add(&newres->lr_reference, "lock", lock);
418 * To flip the lock from the old to the new resource, lock, oldres and
419 * newres have to be locked. Resource spin-locks are nested within
420 * lock->l_lock, and are taken in the memory address order to avoid
423 cfs_spin_lock(&lock->l_lock);
424 oldres = lock->l_resource;
425 if (oldres < newres) {
427 lock_res_nested(newres, LRT_NEW);
430 lock_res_nested(oldres, LRT_NEW);
432 LASSERT(memcmp(new_resid, &oldres->lr_name,
433 sizeof oldres->lr_name) != 0);
434 lock->l_resource = newres;
436 unlock_res_and_lock(lock);
438 /* ...and the flowers are still standing! */
439 lu_ref_del(&oldres->lr_reference, "lock", lock);
440 ldlm_resource_putref(oldres);
449 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
451 lockh->cookie = lock->l_handle.h_cookie;
454 /* if flags: atomically get the lock and set the flags.
455 * Return NULL if flag already set
458 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
461 struct ldlm_namespace *ns;
462 struct ldlm_lock *lock, *retval = NULL;
467 lock = class_handle2object(handle->cookie);
471 LASSERT(lock->l_resource != NULL);
472 ns = ldlm_lock_to_ns(lock);
475 lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
476 lock_res_and_lock(lock);
478 /* It's unlikely but possible that someone marked the lock as
479 * destroyed after we did handle2object on it */
480 if (lock->l_destroyed) {
481 unlock_res_and_lock(lock);
482 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
487 if (flags && (lock->l_flags & flags)) {
488 unlock_res_and_lock(lock);
494 lock->l_flags |= flags;
496 unlock_res_and_lock(lock);
503 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
505 struct obd_export *exp = lock->l_export?:lock->l_conn_export;
506 /* INODEBITS_INTEROP: If the other side does not support
507 * inodebits, reply with a plain lock descriptor.
509 if ((lock->l_resource->lr_type == LDLM_IBITS) &&
510 (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
511 /* Make sure all the right bits are set in this lock we
512 are going to pass to client */
513 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
514 (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
515 "Inappropriate inode lock bits during "
516 "conversion " LPU64 "\n",
517 lock->l_policy_data.l_inodebits.bits);
519 ldlm_res2desc(lock->l_resource, &desc->l_resource);
520 desc->l_resource.lr_type = LDLM_PLAIN;
522 /* Convert "new" lock mode to something old client can
524 if ((lock->l_req_mode == LCK_CR) ||
525 (lock->l_req_mode == LCK_CW))
526 desc->l_req_mode = LCK_PR;
528 desc->l_req_mode = lock->l_req_mode;
529 if ((lock->l_granted_mode == LCK_CR) ||
530 (lock->l_granted_mode == LCK_CW)) {
531 desc->l_granted_mode = LCK_PR;
533 /* We never grant PW/EX locks to clients */
534 LASSERT((lock->l_granted_mode != LCK_PW) &&
535 (lock->l_granted_mode != LCK_EX));
536 desc->l_granted_mode = lock->l_granted_mode;
539 /* We do not copy policy here, because there is no
540 policy for plain locks */
542 ldlm_res2desc(lock->l_resource, &desc->l_resource);
543 desc->l_req_mode = lock->l_req_mode;
544 desc->l_granted_mode = lock->l_granted_mode;
545 desc->l_policy_data = lock->l_policy_data;
549 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
550 cfs_list_t *work_list)
552 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
553 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
554 lock->l_flags |= LDLM_FL_AST_SENT;
555 /* If the enqueuing client said so, tell the AST recipient to
556 * discard dirty data, rather than writing back. */
557 if (new->l_flags & LDLM_AST_DISCARD_DATA)
558 lock->l_flags |= LDLM_FL_DISCARD_DATA;
559 LASSERT(cfs_list_empty(&lock->l_bl_ast));
560 cfs_list_add(&lock->l_bl_ast, work_list);
562 LASSERT(lock->l_blocking_lock == NULL);
563 lock->l_blocking_lock = LDLM_LOCK_GET(new);
567 void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
569 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
570 lock->l_flags |= LDLM_FL_CP_REQD;
571 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
572 LASSERT(cfs_list_empty(&lock->l_cp_ast));
573 cfs_list_add(&lock->l_cp_ast, work_list);
578 /* must be called with lr_lock held */
579 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
580 cfs_list_t *work_list)
583 check_res_locked(lock->l_resource);
585 ldlm_add_bl_work_item(lock, new, work_list);
587 ldlm_add_cp_work_item(lock, work_list);
591 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
593 struct ldlm_lock *lock;
595 lock = ldlm_handle2lock(lockh);
596 LASSERT(lock != NULL);
597 ldlm_lock_addref_internal(lock, mode);
601 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
603 ldlm_lock_remove_from_lru(lock);
604 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
606 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
608 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
610 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
613 lu_ref_add_atomic(&lock->l_reference, "user", lock);
614 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
618 * Attempts to addref a lock, and fails if lock is already LDLM_FL_CBPENDING
621 * \retval 0 success, lock was addref-ed
623 * \retval -EAGAIN lock is being canceled.
625 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
627 struct ldlm_lock *lock;
631 lock = ldlm_handle2lock(lockh);
633 lock_res_and_lock(lock);
634 if (lock->l_readers != 0 || lock->l_writers != 0 ||
635 !(lock->l_flags & LDLM_FL_CBPENDING)) {
636 ldlm_lock_addref_internal_nolock(lock, mode);
639 unlock_res_and_lock(lock);
645 /* only called for local locks */
646 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
648 lock_res_and_lock(lock);
649 ldlm_lock_addref_internal_nolock(lock, mode);
650 unlock_res_and_lock(lock);
653 /* only called in ldlm_flock_destroy and for local locks.
654 * * for LDLM_FLOCK type locks, l_blocking_ast is null, and
655 * * ldlm_lock_remove_from_lru() does nothing, it is safe
656 * * for ldlm_flock_destroy usage by dropping some code */
657 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
659 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
660 if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
661 LASSERT(lock->l_readers > 0);
662 lu_ref_del(&lock->l_reference, "reader", lock);
665 if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
666 LASSERT(lock->l_writers > 0);
667 lu_ref_del(&lock->l_reference, "writer", lock);
671 lu_ref_del(&lock->l_reference, "user", lock);
672 LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */
675 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
677 struct ldlm_namespace *ns;
680 lock_res_and_lock(lock);
682 ns = ldlm_lock_to_ns(lock);
684 ldlm_lock_decref_internal_nolock(lock, mode);
686 if (lock->l_flags & LDLM_FL_LOCAL &&
687 !lock->l_readers && !lock->l_writers) {
688 /* If this is a local lock on a server namespace and this was
689 * the last reference, cancel the lock. */
690 CDEBUG(D_INFO, "forcing cancel of local lock\n");
691 lock->l_flags |= LDLM_FL_CBPENDING;
694 if (!lock->l_readers && !lock->l_writers &&
695 (lock->l_flags & LDLM_FL_CBPENDING)) {
696 /* If we received a blocked AST and this was the last reference,
697 * run the callback. */
698 if (ns_is_server(ns) && lock->l_export)
699 CERROR("FL_CBPENDING set on non-local lock--just a "
702 LDLM_DEBUG(lock, "final decref done on cbpending lock");
704 LDLM_LOCK_GET(lock); /* dropped by bl thread */
705 ldlm_lock_remove_from_lru(lock);
706 unlock_res_and_lock(lock);
708 if (lock->l_flags & LDLM_FL_FAIL_LOC)
709 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
711 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
712 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
713 ldlm_handle_bl_callback(ns, NULL, lock);
714 } else if (ns_is_client(ns) &&
715 !lock->l_readers && !lock->l_writers &&
716 !(lock->l_flags & LDLM_FL_BL_AST)) {
717 /* If this is a client-side namespace and this was the last
718 * reference, put it on the LRU. */
719 ldlm_lock_add_to_lru(lock);
720 unlock_res_and_lock(lock);
722 if (lock->l_flags & LDLM_FL_FAIL_LOC)
723 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
725 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
726 * are not supported by the server, otherwise, it is done on
728 if (!exp_connect_cancelset(lock->l_conn_export) &&
729 !ns_connect_lru_resize(ns))
730 ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
732 unlock_res_and_lock(lock);
738 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
740 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
741 LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
742 ldlm_lock_decref_internal(lock, mode);
746 /* This will drop a lock reference and mark it for destruction, but will not
747 * necessarily cancel the lock before returning. */
748 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
750 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
753 LASSERT(lock != NULL);
755 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
756 lock_res_and_lock(lock);
757 lock->l_flags |= LDLM_FL_CBPENDING;
758 unlock_res_and_lock(lock);
759 ldlm_lock_decref_internal(lock, mode);
763 struct sl_insert_point {
764 cfs_list_t *res_link;
765 cfs_list_t *mode_link;
766 cfs_list_t *policy_link;
770 * search_granted_lock
773 * Finds a position to insert the new lock.
775 * queue [input]: the granted list where search acts on;
776 * req [input]: the lock whose position to be located;
777 * prev [output]: positions within 3 lists to insert @req to
781 * - ldlm_grant_lock_with_skiplist
783 static void search_granted_lock(cfs_list_t *queue,
784 struct ldlm_lock *req,
785 struct sl_insert_point *prev)
788 struct ldlm_lock *lock, *mode_end, *policy_end;
791 cfs_list_for_each(tmp, queue) {
792 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
794 mode_end = cfs_list_entry(lock->l_sl_mode.prev,
795 struct ldlm_lock, l_sl_mode);
797 if (lock->l_req_mode != req->l_req_mode) {
798 /* jump to last lock of mode group */
799 tmp = &mode_end->l_res_link;
803 /* suitable mode group is found */
804 if (lock->l_resource->lr_type == LDLM_PLAIN) {
805 /* insert point is last lock of the mode group */
806 prev->res_link = &mode_end->l_res_link;
807 prev->mode_link = &mode_end->l_sl_mode;
808 prev->policy_link = &req->l_sl_policy;
811 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
814 cfs_list_entry(lock->l_sl_policy.prev,
818 if (lock->l_policy_data.l_inodebits.bits ==
819 req->l_policy_data.l_inodebits.bits) {
820 /* insert point is last lock of
821 * the policy group */
823 &policy_end->l_res_link;
825 &policy_end->l_sl_mode;
827 &policy_end->l_sl_policy;
832 if (policy_end == mode_end)
833 /* done with mode group */
836 /* go to next policy group within mode group */
837 tmp = policy_end->l_res_link.next;
838 lock = cfs_list_entry(tmp, struct ldlm_lock,
840 } /* loop over policy groups within the mode group */
842 /* insert point is last lock of the mode group,
843 * new policy group is started */
844 prev->res_link = &mode_end->l_res_link;
845 prev->mode_link = &mode_end->l_sl_mode;
846 prev->policy_link = &req->l_sl_policy;
850 LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
855 /* insert point is last lock on the queue,
856 * new mode group and new policy group are started */
857 prev->res_link = queue->prev;
858 prev->mode_link = &req->l_sl_mode;
859 prev->policy_link = &req->l_sl_policy;
864 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
865 struct sl_insert_point *prev)
867 struct ldlm_resource *res = lock->l_resource;
870 check_res_locked(res);
872 ldlm_resource_dump(D_INFO, res);
873 CDEBUG(D_OTHER, "About to add this lock:\n");
874 ldlm_lock_dump(D_OTHER, lock, 0);
876 if (lock->l_destroyed) {
877 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
881 LASSERT(cfs_list_empty(&lock->l_res_link));
882 LASSERT(cfs_list_empty(&lock->l_sl_mode));
883 LASSERT(cfs_list_empty(&lock->l_sl_policy));
885 cfs_list_add(&lock->l_res_link, prev->res_link);
886 cfs_list_add(&lock->l_sl_mode, prev->mode_link);
887 cfs_list_add(&lock->l_sl_policy, prev->policy_link);
892 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
894 struct sl_insert_point prev;
897 LASSERT(lock->l_req_mode == lock->l_granted_mode);
899 search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
900 ldlm_granted_list_add_lock(lock, &prev);
905 * - ldlm_lock_enqueue
906 * - ldlm_reprocess_queue
907 * - ldlm_lock_convert
909 * must be called with lr_lock held
911 void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
913 struct ldlm_resource *res = lock->l_resource;
916 check_res_locked(res);
918 lock->l_granted_mode = lock->l_req_mode;
919 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
920 ldlm_grant_lock_with_skiplist(lock);
921 else if (res->lr_type == LDLM_EXTENT)
922 ldlm_extent_add_lock(res, lock);
924 ldlm_resource_add_lock(res, &res->lr_granted, lock);
926 if (lock->l_granted_mode < res->lr_most_restr)
927 res->lr_most_restr = lock->l_granted_mode;
929 if (work_list && lock->l_completion_ast != NULL)
930 ldlm_add_ast_work_item(lock, NULL, work_list);
932 ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
936 /* returns a referenced lock or NULL. See the flag descriptions below, in the
937 * comment above ldlm_lock_match */
938 static struct ldlm_lock *search_queue(cfs_list_t *queue,
940 ldlm_policy_data_t *policy,
941 struct ldlm_lock *old_lock,
942 int flags, int unref)
944 struct ldlm_lock *lock;
947 cfs_list_for_each(tmp, queue) {
950 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
952 if (lock == old_lock)
955 /* llite sometimes wants to match locks that will be
956 * canceled when their users drop, but we allow it to match
957 * if it passes in CBPENDING and the lock still has users.
958 * this is generally only going to be used by children
959 * whose parents already hold a lock so forward progress
960 * can still happen. */
961 if (lock->l_flags & LDLM_FL_CBPENDING &&
962 !(flags & LDLM_FL_CBPENDING))
964 if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
965 lock->l_readers == 0 && lock->l_writers == 0)
968 if (!(lock->l_req_mode & *mode))
970 match = lock->l_req_mode;
972 if (lock->l_resource->lr_type == LDLM_EXTENT &&
973 (lock->l_policy_data.l_extent.start >
974 policy->l_extent.start ||
975 lock->l_policy_data.l_extent.end < policy->l_extent.end))
978 if (unlikely(match == LCK_GROUP) &&
979 lock->l_resource->lr_type == LDLM_EXTENT &&
980 lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
983 /* We match if we have existing lock with same or wider set
985 if (lock->l_resource->lr_type == LDLM_IBITS &&
986 ((lock->l_policy_data.l_inodebits.bits &
987 policy->l_inodebits.bits) !=
988 policy->l_inodebits.bits))
992 (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED)))
995 if ((flags & LDLM_FL_LOCAL_ONLY) &&
996 !(lock->l_flags & LDLM_FL_LOCAL))
999 if (flags & LDLM_FL_TEST_LOCK) {
1000 LDLM_LOCK_GET(lock);
1001 ldlm_lock_touch_in_lru(lock);
1003 ldlm_lock_addref_internal_nolock(lock, match);
1012 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1014 lock->l_flags |= LDLM_FL_LVB_READY;
1015 cfs_waitq_signal(&lock->l_waitq);
1018 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1020 lock_res_and_lock(lock);
1021 ldlm_lock_allow_match_locked(lock);
1022 unlock_res_and_lock(lock);
1025 /* Can be called in two ways:
1027 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1028 * for a duplicate of.
1030 * Otherwise, all of the fields must be filled in, to match against.
1032 * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1033 * server (ie, connh is NULL)
1034 * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1035 * list will be considered
1036 * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1037 * to be canceled can still be matched as long as they still have reader
1038 * or writer refernces
1039 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1040 * just tell us if we would have matched.
1042 * Returns 1 if it finds an already-existing lock that is compatible; in this
1043 * case, lockh is filled in with a addref()ed lock
1045 * we also check security context, if that failed we simply return 0 (to keep
1046 * caller code unchanged), the context failure will be discovered by caller
1049 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1050 const struct ldlm_res_id *res_id, ldlm_type_t type,
1051 ldlm_policy_data_t *policy, ldlm_mode_t mode,
1052 struct lustre_handle *lockh, int unref)
1054 struct ldlm_resource *res;
1055 struct ldlm_lock *lock, *old_lock = NULL;
1060 old_lock = ldlm_handle2lock(lockh);
1063 ns = ldlm_lock_to_ns(old_lock);
1064 res_id = &old_lock->l_resource->lr_name;
1065 type = old_lock->l_resource->lr_type;
1066 mode = old_lock->l_req_mode;
1069 res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1071 LASSERT(old_lock == NULL);
1075 LDLM_RESOURCE_ADDREF(res);
1078 lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
1082 if (flags & LDLM_FL_BLOCK_GRANTED)
1084 lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
1088 lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
1096 LDLM_RESOURCE_DELREF(res);
1097 ldlm_resource_putref(res);
1100 ldlm_lock2handle(lock, lockh);
1101 if ((flags & LDLM_FL_LVB_READY) &&
1102 (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1103 struct l_wait_info lwi;
1104 if (lock->l_completion_ast) {
1105 int err = lock->l_completion_ast(lock,
1106 LDLM_FL_WAIT_NOREPROC,
1109 if (flags & LDLM_FL_TEST_LOCK)
1110 LDLM_LOCK_RELEASE(lock);
1112 ldlm_lock_decref_internal(lock,
1119 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1120 NULL, LWI_ON_SIGNAL_NOOP, NULL);
1122 /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1123 l_wait_event(lock->l_waitq,
1124 (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1129 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1130 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1131 res_id->name[2] : policy->l_extent.start,
1132 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1133 res_id->name[3] : policy->l_extent.end);
1135 /* check user's security context */
1136 if (lock->l_conn_export &&
1137 sptlrpc_import_check_ctx(
1138 class_exp2cliimp(lock->l_conn_export))) {
1139 if (!(flags & LDLM_FL_TEST_LOCK))
1140 ldlm_lock_decref_internal(lock, mode);
1144 if (flags & LDLM_FL_TEST_LOCK)
1145 LDLM_LOCK_RELEASE(lock);
1147 } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1148 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1149 LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1150 type, mode, res_id->name[0], res_id->name[1],
1151 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1152 res_id->name[2] :policy->l_extent.start,
1153 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1154 res_id->name[3] : policy->l_extent.end);
1157 LDLM_LOCK_PUT(old_lock);
1159 return rc ? mode : 0;
1162 /* Returns a referenced lock */
1163 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1164 const struct ldlm_res_id *res_id,
1167 const struct ldlm_callback_suite *cbs,
1168 void *data, __u32 lvb_len)
1170 struct ldlm_lock *lock;
1171 struct ldlm_resource *res;
1174 res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1178 lock = ldlm_lock_new(res);
1179 ldlm_resource_putref(res);
1184 lock->l_req_mode = mode;
1185 lock->l_ast_data = data;
1186 lock->l_pid = cfs_curproc_pid();
1188 lock->l_blocking_ast = cbs->lcs_blocking;
1189 lock->l_completion_ast = cbs->lcs_completion;
1190 lock->l_glimpse_ast = cbs->lcs_glimpse;
1191 lock->l_weigh_ast = cbs->lcs_weigh;
1194 lock->l_tree_node = NULL;
1195 /* if this is the extent lock, allocate the interval tree node */
1196 if (type == LDLM_EXTENT) {
1197 if (ldlm_interval_alloc(lock) == NULL)
1202 lock->l_lvb_len = lvb_len;
1203 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1204 if (lock->l_lvb_data == NULL)
1208 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1214 ldlm_lock_destroy(lock);
1215 LDLM_LOCK_RELEASE(lock);
1219 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1220 struct ldlm_lock **lockp,
1221 void *cookie, int *flags)
1223 struct ldlm_lock *lock = *lockp;
1224 struct ldlm_resource *res = lock->l_resource;
1225 int local = ns_is_client(ldlm_res_to_ns(res));
1226 ldlm_processing_policy policy;
1227 ldlm_error_t rc = ELDLM_OK;
1228 struct ldlm_interval *node = NULL;
1231 lock->l_last_activity = cfs_time_current_sec();
1232 /* policies are not executed on the client or during replay */
1233 if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1234 && !local && ns->ns_policy) {
1235 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1237 if (rc == ELDLM_LOCK_REPLACED) {
1238 /* The lock that was returned has already been granted,
1239 * and placed into lockp. If it's not the same as the
1240 * one we passed in, then destroy the old one and our
1241 * work here is done. */
1242 if (lock != *lockp) {
1243 ldlm_lock_destroy(lock);
1244 LDLM_LOCK_RELEASE(lock);
1246 *flags |= LDLM_FL_LOCK_CHANGED;
1248 } else if (rc != ELDLM_OK ||
1249 (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1250 ldlm_lock_destroy(lock);
1255 /* For a replaying lock, it might be already in granted list. So
1256 * unlinking the lock will cause the interval node to be freed, we
1257 * have to allocate the interval node early otherwise we can't regrant
1258 * this lock in the future. - jay */
1259 if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1260 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1262 lock_res_and_lock(lock);
1263 if (local && lock->l_req_mode == lock->l_granted_mode) {
1264 /* The server returned a blocked lock, but it was granted
1265 * before we got a chance to actually enqueue it. We don't
1266 * need to do anything else. */
1267 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1268 LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1269 GOTO(out, ELDLM_OK);
1272 ldlm_resource_unlink_lock(lock);
1273 if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1275 ldlm_lock_destroy_nolock(lock);
1276 GOTO(out, rc = -ENOMEM);
1279 CFS_INIT_LIST_HEAD(&node->li_group);
1280 ldlm_interval_attach(node, lock);
1284 /* Some flags from the enqueue want to make it into the AST, via the
1285 * lock's l_flags. */
1286 lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1288 /* This distinction between local lock trees is very important; a client
1289 * namespace only has information about locks taken by that client, and
1290 * thus doesn't have enough information to decide for itself if it can
1291 * be granted (below). In this case, we do exactly what the server
1292 * tells us to do, as dictated by the 'flags'.
1294 * We do exactly the same thing during recovery, when the server is
1295 * more or less trusting the clients not to lie.
1297 * FIXME (bug 268): Detect obvious lies by checking compatibility in
1298 * granted/converting queues. */
1300 if (*flags & LDLM_FL_BLOCK_CONV)
1301 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1302 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1303 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1305 ldlm_grant_lock(lock, NULL);
1306 GOTO(out, ELDLM_OK);
1307 } else if (*flags & LDLM_FL_REPLAY) {
1308 if (*flags & LDLM_FL_BLOCK_CONV) {
1309 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1310 GOTO(out, ELDLM_OK);
1311 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1312 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1313 GOTO(out, ELDLM_OK);
1314 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1315 ldlm_grant_lock(lock, NULL);
1316 GOTO(out, ELDLM_OK);
1318 /* If no flags, fall through to normal enqueue path. */
1321 policy = ldlm_processing_policy_table[res->lr_type];
1322 policy(lock, flags, 1, &rc, NULL);
1325 unlock_res_and_lock(lock);
1327 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1331 /* Must be called with namespace taken: queue is waiting or converting. */
1332 int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
1333 cfs_list_t *work_list)
1335 cfs_list_t *tmp, *pos;
1336 ldlm_processing_policy policy;
1338 int rc = LDLM_ITER_CONTINUE;
1342 check_res_locked(res);
1344 policy = ldlm_processing_policy_table[res->lr_type];
1347 cfs_list_for_each_safe(tmp, pos, queue) {
1348 struct ldlm_lock *pending;
1349 pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
1351 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1354 rc = policy(pending, &flags, 0, &err, work_list);
1355 if (rc != LDLM_ITER_CONTINUE)
1362 /* Helper function for ldlm_run_ast_work().
1364 * Send an existing rpc set specified by @arg->set and then
1365 * destroy it. Create new one if @do_create flag is set. */
1367 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1371 ptlrpc_set_wait(arg->set);
1372 if (arg->type == LDLM_BL_CALLBACK)
1373 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1374 ptlrpc_set_destroy(arg->set);
1377 arg->set = ptlrpc_prep_set();
1383 ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1385 struct ldlm_lock_desc d;
1386 struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1390 /* nobody should touch l_bl_ast */
1391 lock_res_and_lock(lock);
1392 cfs_list_del_init(&lock->l_bl_ast);
1394 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1395 LASSERT(lock->l_bl_ast_run == 0);
1396 LASSERT(lock->l_blocking_lock);
1397 lock->l_bl_ast_run++;
1398 unlock_res_and_lock(lock);
1400 ldlm_lock2desc(lock->l_blocking_lock, &d);
1402 lock->l_blocking_ast(lock, &d, (void *)arg,
1404 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1405 lock->l_blocking_lock = NULL;
1406 LDLM_LOCK_RELEASE(lock);
1412 ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1414 struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
1415 ldlm_completion_callback completion_callback;
1419 /* It's possible to receive a completion AST before we've set
1420 * the l_completion_ast pointer: either because the AST arrived
1421 * before the reply, or simply because there's a small race
1422 * window between receiving the reply and finishing the local
1423 * enqueue. (bug 842)
1425 * This can't happen with the blocking_ast, however, because we
1426 * will never call the local blocking_ast until we drop our
1427 * reader/writer reference, which we won't do until we get the
1428 * reply and finish enqueueing. */
1430 /* nobody should touch l_cp_ast */
1431 lock_res_and_lock(lock);
1432 cfs_list_del_init(&lock->l_cp_ast);
1433 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1434 /* save l_completion_ast since it can be changed by
1435 * mds_intent_policy(), see bug 14225 */
1436 completion_callback = lock->l_completion_ast;
1437 lock->l_flags &= ~LDLM_FL_CP_REQD;
1438 unlock_res_and_lock(lock);
1440 if (completion_callback != NULL) {
1441 completion_callback(lock, 0, (void *)arg);
1444 LDLM_LOCK_RELEASE(lock);
1450 ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1452 struct ldlm_lock_desc desc;
1453 struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1457 cfs_list_del_init(&lock->l_rk_ast);
1459 /* the desc just pretend to exclusive */
1460 ldlm_lock2desc(lock, &desc);
1461 desc.l_req_mode = LCK_EX;
1462 desc.l_granted_mode = 0;
1464 lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1465 LDLM_LOCK_RELEASE(lock);
1470 int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
1472 struct ldlm_cb_set_arg arg;
1473 cfs_list_t *tmp, *pos;
1474 int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
1478 if (cfs_list_empty(rpc_list))
1481 arg.set = ptlrpc_prep_set();
1482 if (NULL == arg.set)
1484 cfs_atomic_set(&arg.restart, 0);
1486 case LDLM_WORK_BL_AST:
1487 arg.type = LDLM_BL_CALLBACK;
1488 work_ast_lock = ldlm_work_bl_ast_lock;
1490 case LDLM_WORK_CP_AST:
1491 arg.type = LDLM_CP_CALLBACK;
1492 work_ast_lock = ldlm_work_cp_ast_lock;
1494 case LDLM_WORK_REVOKE_AST:
1495 arg.type = LDLM_BL_CALLBACK;
1496 work_ast_lock = ldlm_work_revoke_ast_lock;
1503 cfs_list_for_each_safe(tmp, pos, rpc_list) {
1504 ast_count += work_ast_lock(tmp, &arg);
1506 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1507 * and create a new set for requests that remained in
1509 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1510 ldlm_send_and_maybe_create_set(&arg, 1);
1516 ldlm_send_and_maybe_create_set(&arg, 0);
1518 /* In case when number of ASTs is multiply of
1519 * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1520 * @arg.set must be destroyed here, otherwise we get
1521 * write memory leaking. */
1522 ptlrpc_set_destroy(arg.set);
1524 RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
1527 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1529 ldlm_reprocess_all(res);
1530 return LDLM_ITER_CONTINUE;
1533 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1542 cfs_spin_lock(&ns->ns_hash_lock);
1543 for (i = 0; i < RES_HASH_SIZE; i++) {
1544 tmp = ns->ns_hash[i].next;
1545 while (tmp != &(ns->ns_hash[i])) {
1546 struct ldlm_resource *res =
1547 cfs_list_entry(tmp, struct ldlm_resource,
1550 ldlm_resource_getref(res);
1551 cfs_spin_unlock(&ns->ns_hash_lock);
1552 LDLM_RESOURCE_ADDREF(res);
1554 rc = reprocess_one_queue(res, NULL);
1556 LDLM_RESOURCE_DELREF(res);
1557 cfs_spin_lock(&ns->ns_hash_lock);
1559 ldlm_resource_putref_locked(res);
1561 if (rc == LDLM_ITER_STOP)
1566 cfs_spin_unlock(&ns->ns_hash_lock);
1570 void ldlm_reprocess_all(struct ldlm_resource *res)
1572 CFS_LIST_HEAD(rpc_list);
1576 /* Local lock trees don't get reprocessed. */
1577 if (ns_is_client(ldlm_res_to_ns(res))) {
1584 rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1585 if (rc == LDLM_ITER_CONTINUE)
1586 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1589 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1590 if (rc == -ERESTART) {
1591 LASSERT(cfs_list_empty(&rpc_list));
1597 void ldlm_cancel_callback(struct ldlm_lock *lock)
1599 check_res_locked(lock->l_resource);
1600 if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1601 lock->l_flags |= LDLM_FL_CANCEL;
1602 if (lock->l_blocking_ast) {
1603 // l_check_no_ns_lock(ns);
1604 unlock_res_and_lock(lock);
1605 lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1607 lock_res_and_lock(lock);
1609 LDLM_DEBUG(lock, "no blocking ast");
1612 lock->l_flags |= LDLM_FL_BL_DONE;
1615 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1617 if (req->l_resource->lr_type != LDLM_PLAIN &&
1618 req->l_resource->lr_type != LDLM_IBITS)
1621 cfs_list_del_init(&req->l_sl_policy);
1622 cfs_list_del_init(&req->l_sl_mode);
1625 void ldlm_lock_cancel(struct ldlm_lock *lock)
1627 struct ldlm_resource *res;
1628 struct ldlm_namespace *ns;
1631 lock_res_and_lock(lock);
1633 res = lock->l_resource;
1634 ns = ldlm_res_to_ns(res);
1636 /* Please do not, no matter how tempting, remove this LBUG without
1637 * talking to me first. -phik */
1638 if (lock->l_readers || lock->l_writers) {
1639 LDLM_ERROR(lock, "lock still has references");
1643 ldlm_del_waiting_lock(lock);
1645 /* Releases cancel callback. */
1646 ldlm_cancel_callback(lock);
1648 /* Yes, second time, just in case it was added again while we were
1649 running with no res lock in ldlm_cancel_callback */
1650 ldlm_del_waiting_lock(lock);
1651 ldlm_resource_unlink_lock(lock);
1652 ldlm_lock_destroy_nolock(lock);
1654 if (lock->l_granted_mode == lock->l_req_mode)
1655 ldlm_pool_del(&ns->ns_pool, lock);
1657 /* Make sure we will not be called again for same lock what is possible
1658 * if not to zero out lock->l_granted_mode */
1659 lock->l_granted_mode = LCK_MINMODE;
1660 unlock_res_and_lock(lock);
1665 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1667 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1673 lock->l_ast_data = data;
1674 LDLM_LOCK_PUT(lock);
1678 int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1679 cfs_hlist_node_t *hnode, void *data)
1682 struct obd_export *exp = data;
1683 struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
1684 struct ldlm_resource *res;
1686 res = ldlm_resource_getref(lock->l_resource);
1687 LDLM_LOCK_GET(lock);
1689 LDLM_DEBUG(lock, "export %p", exp);
1690 ldlm_res_lvbo_update(res, NULL, 1);
1691 ldlm_lock_cancel(lock);
1692 ldlm_reprocess_all(res);
1693 ldlm_resource_putref(res);
1694 LDLM_LOCK_RELEASE(lock);
1698 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1700 cfs_hash_for_each_empty(exp->exp_lock_hash,
1701 ldlm_cancel_locks_for_export_cb, exp);
1705 * Downgrade an exclusive lock.
1707 * A fast variant of ldlm_lock_convert for convertion of exclusive
1708 * locks. The convertion is always successful.
1710 * \param lock A lock to convert
1711 * \param new_mode new lock mode
1713 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
1715 struct ldlm_namespace *ns;
1718 LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
1719 LASSERT(new_mode == LCK_COS);
1721 lock_res_and_lock(lock);
1722 ldlm_resource_unlink_lock(lock);
1724 * Remove the lock from pool as it will be added again in
1725 * ldlm_grant_lock() called below.
1727 ns = ldlm_lock_to_ns(lock);
1728 ldlm_pool_del(&ns->ns_pool, lock);
1730 lock->l_req_mode = new_mode;
1731 ldlm_grant_lock(lock, NULL);
1732 unlock_res_and_lock(lock);
1733 ldlm_reprocess_all(lock->l_resource);
1738 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1741 CFS_LIST_HEAD(rpc_list);
1742 struct ldlm_resource *res;
1743 struct ldlm_namespace *ns;
1746 struct sl_insert_point prev;
1748 struct ldlm_interval *node;
1751 if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1752 *flags |= LDLM_FL_BLOCK_GRANTED;
1753 RETURN(lock->l_resource);
1756 /* I can't check the type of lock here because the bitlock of lock
1757 * is not held here, so do the allocation blindly. -jay */
1758 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1759 if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */
1762 LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
1763 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1765 lock_res_and_lock(lock);
1767 res = lock->l_resource;
1768 ns = ldlm_res_to_ns(res);
1770 old_mode = lock->l_req_mode;
1771 lock->l_req_mode = new_mode;
1772 if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1773 /* remember the lock position where the lock might be
1774 * added back to the granted list later and also
1775 * remember the join mode for skiplist fixing. */
1776 prev.res_link = lock->l_res_link.prev;
1777 prev.mode_link = lock->l_sl_mode.prev;
1778 prev.policy_link = lock->l_sl_policy.prev;
1779 ldlm_resource_unlink_lock(lock);
1781 ldlm_resource_unlink_lock(lock);
1782 if (res->lr_type == LDLM_EXTENT) {
1783 /* FIXME: ugly code, I have to attach the lock to a
1784 * interval node again since perhaps it will be granted
1786 CFS_INIT_LIST_HEAD(&node->li_group);
1787 ldlm_interval_attach(node, lock);
1793 * Remove old lock from the pool before adding the lock with new
1794 * mode below in ->policy()
1796 ldlm_pool_del(&ns->ns_pool, lock);
1798 /* If this is a local resource, put it on the appropriate list. */
1799 if (ns_is_client(ldlm_res_to_ns(res))) {
1800 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1801 ldlm_resource_add_lock(res, &res->lr_converting, lock);
1803 /* This should never happen, because of the way the
1804 * server handles conversions. */
1805 LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1809 ldlm_grant_lock(lock, &rpc_list);
1811 /* FIXME: completion handling not with ns_lock held ! */
1812 if (lock->l_completion_ast)
1813 lock->l_completion_ast(lock, 0, NULL);
1817 ldlm_processing_policy policy;
1818 policy = ldlm_processing_policy_table[res->lr_type];
1819 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1820 if (rc == LDLM_ITER_STOP) {
1821 lock->l_req_mode = old_mode;
1822 if (res->lr_type == LDLM_EXTENT)
1823 ldlm_extent_add_lock(res, lock);
1825 ldlm_granted_list_add_lock(lock, &prev);
1829 *flags |= LDLM_FL_BLOCK_GRANTED;
1833 unlock_res_and_lock(lock);
1836 ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1838 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1842 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1844 struct obd_device *obd = NULL;
1846 if (!((libcfs_debug | D_ERROR) & level))
1850 CDEBUG(level, " NULL LDLM lock\n");
1854 CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1855 lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1857 if (lock->l_conn_export != NULL)
1858 obd = lock->l_conn_export->exp_obd;
1859 if (lock->l_export && lock->l_export->exp_connection) {
1860 CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n",
1861 libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1862 lock->l_remote_handle.cookie);
1863 } else if (obd == NULL) {
1864 CDEBUG(level, " Node: local\n");
1866 struct obd_import *imp = obd->u.cli.cl_import;
1867 CDEBUG(level, " Node: NID %s (rhandle: "LPX64")\n",
1868 libcfs_nid2str(imp->imp_connection->c_peer.nid),
1869 lock->l_remote_handle.cookie);
1871 CDEBUG(level, " Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1873 lock->l_resource->lr_name.name[0],
1874 lock->l_resource->lr_name.name[1],
1875 lock->l_resource->lr_name.name[2]);
1876 CDEBUG(level, " Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1877 "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
1878 ldlm_lockname[lock->l_granted_mode],
1879 cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1881 if (lock->l_resource->lr_type == LDLM_EXTENT)
1882 CDEBUG(level, " Extent: "LPU64" -> "LPU64
1883 " (req "LPU64"-"LPU64")\n",
1884 lock->l_policy_data.l_extent.start,
1885 lock->l_policy_data.l_extent.end,
1886 lock->l_req_extent.start, lock->l_req_extent.end);
1887 else if (lock->l_resource->lr_type == LDLM_FLOCK)
1888 CDEBUG(level, " Pid: %d Extent: "LPU64" -> "LPU64"\n",
1889 lock->l_policy_data.l_flock.pid,
1890 lock->l_policy_data.l_flock.start,
1891 lock->l_policy_data.l_flock.end);
1892 else if (lock->l_resource->lr_type == LDLM_IBITS)
1893 CDEBUG(level, " Bits: "LPX64"\n",
1894 lock->l_policy_data.l_inodebits.bits);
1897 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1899 struct ldlm_lock *lock;
1901 if (!((libcfs_debug | D_ERROR) & level))
1904 lock = ldlm_handle2lock(lockh);
1908 ldlm_lock_dump(D_OTHER, lock, 0);
1910 LDLM_LOCK_PUT(lock);
1913 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1914 struct libcfs_debug_msg_data *data, const char *fmt,
1918 cfs_debug_limit_state_t *cdls = data->msg_cdls;
1920 va_start(args, fmt);
1922 if (lock->l_resource == NULL) {
1923 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1924 data->msg_fn, data->msg_line, fmt, args,
1925 " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1926 "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
1927 LPX64" expref: %d pid: %u timeout: %lu\n", lock,
1928 lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1929 lock->l_readers, lock->l_writers,
1930 ldlm_lockname[lock->l_granted_mode],
1931 ldlm_lockname[lock->l_req_mode],
1932 lock->l_flags, lock->l_remote_handle.cookie,
1934 cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1935 lock->l_pid, lock->l_callback_timeout);
1940 switch (lock->l_resource->lr_type) {
1942 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1943 data->msg_fn, data->msg_line, fmt, args,
1944 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1945 "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1946 "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
1947 " expref: %d pid: %u timeout %lu\n",
1948 ldlm_lock_to_ns_name(lock), lock,
1949 lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1950 lock->l_readers, lock->l_writers,
1951 ldlm_lockname[lock->l_granted_mode],
1952 ldlm_lockname[lock->l_req_mode],
1953 lock->l_resource->lr_name.name[0],
1954 lock->l_resource->lr_name.name[1],
1955 cfs_atomic_read(&lock->l_resource->lr_refcount),
1956 ldlm_typename[lock->l_resource->lr_type],
1957 lock->l_policy_data.l_extent.start,
1958 lock->l_policy_data.l_extent.end,
1959 lock->l_req_extent.start, lock->l_req_extent.end,
1960 lock->l_flags, lock->l_remote_handle.cookie,
1962 cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1963 lock->l_pid, lock->l_callback_timeout);
1967 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1968 data->msg_fn, data->msg_line, fmt, args,
1969 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1970 "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
1971 "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
1972 " expref: %d pid: %u timeout: %lu\n",
1973 ldlm_lock_to_ns_name(lock), lock,
1974 lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1975 lock->l_readers, lock->l_writers,
1976 ldlm_lockname[lock->l_granted_mode],
1977 ldlm_lockname[lock->l_req_mode],
1978 lock->l_resource->lr_name.name[0],
1979 lock->l_resource->lr_name.name[1],
1980 cfs_atomic_read(&lock->l_resource->lr_refcount),
1981 ldlm_typename[lock->l_resource->lr_type],
1982 lock->l_policy_data.l_flock.pid,
1983 lock->l_policy_data.l_flock.start,
1984 lock->l_policy_data.l_flock.end,
1985 lock->l_flags, lock->l_remote_handle.cookie,
1987 cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1988 lock->l_pid, lock->l_callback_timeout);
1992 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1993 data->msg_fn, data->msg_line, fmt, args,
1994 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1995 "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
1996 "flags: "LPX64" remote: "LPX64" expref: %d "
1997 "pid: %u timeout: %lu\n",
1998 ldlm_lock_to_ns_name(lock),
1999 lock, lock->l_handle.h_cookie,
2000 cfs_atomic_read (&lock->l_refc),
2001 lock->l_readers, lock->l_writers,
2002 ldlm_lockname[lock->l_granted_mode],
2003 ldlm_lockname[lock->l_req_mode],
2004 lock->l_resource->lr_name.name[0],
2005 lock->l_resource->lr_name.name[1],
2006 lock->l_policy_data.l_inodebits.bits,
2007 cfs_atomic_read(&lock->l_resource->lr_refcount),
2008 ldlm_typename[lock->l_resource->lr_type],
2009 lock->l_flags, lock->l_remote_handle.cookie,
2011 cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2012 lock->l_pid, lock->l_callback_timeout);
2016 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2017 data->msg_fn, data->msg_line, fmt, args,
2018 " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2019 "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
2020 "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
2021 ldlm_lock_to_ns_name(lock),
2022 lock, lock->l_handle.h_cookie,
2023 cfs_atomic_read (&lock->l_refc),
2024 lock->l_readers, lock->l_writers,
2025 ldlm_lockname[lock->l_granted_mode],
2026 ldlm_lockname[lock->l_req_mode],
2027 lock->l_resource->lr_name.name[0],
2028 lock->l_resource->lr_name.name[1],
2029 cfs_atomic_read(&lock->l_resource->lr_refcount),
2030 ldlm_typename[lock->l_resource->lr_type],
2031 lock->l_flags, lock->l_remote_handle.cookie,
2033 cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2034 lock->l_pid, lock->l_callback_timeout);
2039 EXPORT_SYMBOL(_ldlm_lock_debug);