X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_plain.c;h=fe27db42ee0c2ba979046d03f9ed17e535843582;hp=6a9960800908be43d782411d147030a9561e08b8;hb=refs%2Fchanges%2F15%2F35815%2F2;hpb=930dca7253bc2531bffa15dc763db1081cdf32d8 diff --git a/lustre/ldlm/ldlm_plain.c b/lustre/ldlm/ldlm_plain.c index 6a99608..fe27db4 100644 --- a/lustre/ldlm/ldlm_plain.c +++ b/lustre/ldlm/ldlm_plain.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -73,49 +69,55 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req, struct list_head *work_list) { enum ldlm_mode req_mode = req->l_req_mode; - struct ldlm_lock *lock; - struct list_head *tmp; + struct ldlm_lock *lock, *next_lock; int compat = 1; - ENTRY; + ENTRY; lockmode_verify(req_mode); - list_for_each_entry(lock, queue, l_res_link) { - /* We stop walking the queue if we hit ourselves so we don't + list_for_each_entry_safe(lock, next_lock, queue, l_res_link) { + + /* + * We stop walking the queue if we hit ourselves so we don't * take conflicting locks enqueued after us into account, - * or we'd wait forever. */ + * or we'd wait forever. + */ if (req == lock) RETURN(compat); /* Advance loop cursor to last lock of mode group. */ - tmp = &list_entry(lock->l_sl_mode.prev, struct ldlm_lock, - l_sl_mode)->l_res_link; + next_lock = list_entry(list_entry(lock->l_sl_mode.prev, + struct ldlm_lock, + l_sl_mode)->l_res_link.next, + struct ldlm_lock, l_res_link); if (lockmode_compat(lock->l_req_mode, req_mode)) - continue; + continue; - if (!work_list) - RETURN(0); + if (!work_list) + RETURN(0); - compat = 0; + compat = 0; - /* Add locks of the mode group to \a work_list as - * blocking locks for \a req. */ - if (lock->l_blocking_ast) - ldlm_add_ast_work_item(lock, req, work_list); + /* + * Add locks of the mode group to \a work_list as + * blocking locks for \a req. + */ + if (lock->l_blocking_ast) + ldlm_add_ast_work_item(lock, req, work_list); - { + { struct list_head *head; - head = &lock->l_sl_mode; + head = &lock->l_sl_mode; list_for_each_entry(lock, head, l_sl_mode) - if (lock->l_blocking_ast) - ldlm_add_ast_work_item(lock, req, - work_list); - } - } + if (lock->l_blocking_ast) + ldlm_add_ast_work_item(lock, req, + work_list); + } + } - RETURN(compat); + RETURN(compat); } /** @@ -125,67 +127,44 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req, * This function looks for any conflicts for \a lock in the granted or * waiting queues. The lock is granted if no conflicts are found in * either queue. - * - * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue): - * - blocking ASTs have already been sent - * - * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue): - * - blocking ASTs have not been sent yet, so list of conflicting locks - * would be collected and ASTs sent. */ int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags, - int first_enq, enum ldlm_error *err, - struct list_head *work_list) + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = lock->l_resource; - struct list_head rpc_list; + struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ? + NULL : work_list; int rc; - ENTRY; - LASSERT(lock->l_granted_mode != lock->l_req_mode); + ENTRY; + LASSERT(!ldlm_is_granted(lock)); check_res_locked(res); - LASSERT(list_empty(&res->lr_converting)); - INIT_LIST_HEAD(&rpc_list); - - if (!first_enq) { - LASSERT(work_list != NULL); - rc = ldlm_plain_compat_queue(&res->lr_granted, lock, NULL); - if (!rc) - RETURN(LDLM_ITER_STOP); - rc = ldlm_plain_compat_queue(&res->lr_waiting, lock, NULL); - if (!rc) - RETURN(LDLM_ITER_STOP); - - ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, work_list); - RETURN(LDLM_ITER_CONTINUE); - } - - restart: - rc = ldlm_plain_compat_queue(&res->lr_granted, lock, &rpc_list); - rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, &rpc_list); - - if (rc != 2) { - /* If either of the compat_queue()s returned 0, then we - * have ASTs to send and must go onto the waiting list. - * - * bug 2322: we used to unlink and re-add here, which was a - * terrible folly -- if we goto restart, we could get - * re-ordered! Causes deadlock, because ASTs aren't sent! */ - if (list_empty(&lock->l_res_link)) - ldlm_resource_add_lock(res, &res->lr_waiting, lock); - unlock_res(res); - rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list, - LDLM_WORK_BL_AST); - lock_res(res); - if (rc == -ERESTART) - GOTO(restart, rc); - *flags |= LDLM_FL_BLOCK_GRANTED; - } else { - ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, NULL); - } - RETURN(0); + *err = ELDLM_OK; + + if (intention == LDLM_PROCESS_RESCAN) { + LASSERT(work_list != NULL); + rc = ldlm_plain_compat_queue(&res->lr_granted, lock, NULL); + if (!rc) + RETURN(LDLM_ITER_STOP); + rc = ldlm_plain_compat_queue(&res->lr_waiting, lock, NULL); + if (!rc) + RETURN(LDLM_ITER_STOP); + + ldlm_resource_unlink_lock(lock); + ldlm_grant_lock(lock, grant_work); + RETURN(LDLM_ITER_CONTINUE); + } + + rc = ldlm_plain_compat_queue(&res->lr_granted, lock, work_list); + rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, work_list); + + if (rc == 2) { + ldlm_resource_unlink_lock(lock); + ldlm_grant_lock(lock, grant_work); + } + + RETURN(LDLM_ITER_CONTINUE); } #endif /* HAVE_SERVER_SUPPORT */