X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_plain.c;h=aa074f5e251fc131cc013a35f6b0c56959aa8882;hb=31a690291b077646d6f42e0c90ce81bb3938deda;hp=423412ea80d1f2b21901f1503e326dd4e8622b53;hpb=d10200a80770f0029d1d665af954187b9ad883df;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/ldlm_plain.c b/lustre/ldlm/ldlm_plain.c index 423412e..aa074f5 100644 --- a/lustre/ldlm/ldlm_plain.c +++ b/lustre/ldlm/ldlm_plain.c @@ -23,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2016, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -123,29 +123,22 @@ ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req, * This function looks for any conflicts for \a lock in the granted or * waiting queues. The lock is granted if no conflicts are found in * either queue. - * - * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue): - * - blocking ASTs have already been sent - * - * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue): - * - blocking ASTs have not been sent yet, so list of conflicting locks - * would be collected and ASTs sent. */ int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags, - int first_enq, enum ldlm_error *err, - struct list_head *work_list) + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = lock->l_resource; - struct list_head rpc_list; + struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ? + NULL : work_list; int rc; ENTRY; LASSERT(lock->l_granted_mode != lock->l_req_mode); check_res_locked(res); - LASSERT(list_empty(&res->lr_converting)); - INIT_LIST_HEAD(&rpc_list); + *err = ELDLM_OK; - if (!first_enq) { + if (intention == LDLM_PROCESS_RESCAN) { LASSERT(work_list != NULL); rc = ldlm_plain_compat_queue(&res->lr_granted, lock, NULL); if (!rc) @@ -155,50 +148,19 @@ int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags, RETURN(LDLM_ITER_STOP); ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, work_list); + ldlm_grant_lock(lock, grant_work); RETURN(LDLM_ITER_CONTINUE); } - restart: - rc = ldlm_plain_compat_queue(&res->lr_granted, lock, &rpc_list); - rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, &rpc_list); - - if (rc != 2) { - /* If either of the compat_queue()s returned 0, then we - * have ASTs to send and must go onto the waiting list. - * - * bug 2322: we used to unlink and re-add here, which was a - * terrible folly -- if we goto restart, we could get - * re-ordered! Causes deadlock, because ASTs aren't sent! */ - if (list_empty(&lock->l_res_link)) - ldlm_resource_add_lock(res, &res->lr_waiting, lock); - unlock_res(res); - rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list, - LDLM_WORK_BL_AST); - lock_res(res); - if (rc == -ERESTART) { - /* We were granted while waiting, nothing left to do */ - if (lock->l_granted_mode == lock->l_req_mode) - GOTO(out, rc = 0); - /* Lock was destroyed while we were waiting, abort */ - if (ldlm_is_destroyed(lock)) - GOTO(out, rc = -EAGAIN); - - /* Otherwise try again */ - GOTO(restart, rc); - } - *flags |= LDLM_FL_BLOCK_GRANTED; - } else { - ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, NULL); - } + rc = ldlm_plain_compat_queue(&res->lr_granted, lock, work_list); + rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, work_list); - rc = 0; -out: - *err = rc; - LASSERT(list_empty(&rpc_list)); + if (rc == 2) { + ldlm_resource_unlink_lock(lock); + ldlm_grant_lock(lock, grant_work); + } - RETURN(rc); + RETURN(LDLM_ITER_CONTINUE); } #endif /* HAVE_SERVER_SUPPORT */