Whamcloud - gitweb
LU-8047 llite: optimizations for not granted lock processing 65/19665/13
authorAndrew Perepechko <c17827@cray.com>
Thu, 7 Mar 2019 20:18:45 +0000 (12:18 -0800)
committerOleg Drokin <green@whamcloud.com>
Thu, 21 Mar 2019 03:42:46 +0000 (03:42 +0000)
This patch removes ll_md_blocking_ast() processing for
not granted locks. The reason is ll_invalidate_negative_children()
can slow down I/O significantly without a reason if there
are thousands or millions of files in the directory
cache.

Change-Id: Ic69c5f02f71c14db4b9609677d102dd2993f4feb
Seagate-bug-id: MRP-3409
Signed-off-by: Andrew Perepechko <c17827@cray.com>
Reviewed-on: https://review.whamcloud.com/19665
Tested-by: Jenkins
Reviewed-by: Mike Pershin <mpershin@whamcloud.com>
Reviewed-by: Lai Siyao <lai.siyao@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
12 files changed:
lustre/include/lustre_dlm.h
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_inodebits.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_plain.c
lustre/ldlm/ldlm_request.c
lustre/llite/namei.c
lustre/mdt/mdt_lvb.c
lustre/osc/osc_lock.c
lustre/osc/osc_request.c

index 3f4f06f..8aa4a76 100644 (file)
@@ -1042,6 +1042,11 @@ struct ldlm_resource {
        struct lu_ref           lr_reference;
 };
 
+static inline int ldlm_is_granted(struct ldlm_lock *lock)
+{
+       return lock->l_req_mode == lock->l_granted_mode;
+}
+
 static inline bool ldlm_has_layout(struct ldlm_lock *lock)
 {
        return lock->l_resource->lr_type == LDLM_IBITS &&
index 97c754d..82e13ef 100644 (file)
@@ -555,8 +555,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                     lock->l_policy_data.l_extent.gid) {
                                         /* If existing lock with matched gid is granted,
                                            we grant new one too. */
-                                        if (lock->l_req_mode == lock->l_granted_mode)
-                                                RETURN(2);
+                                       if (ldlm_is_granted(lock))
+                                               RETURN(2);
 
                                         /* Otherwise we are scanning queue of waiting
                                          * locks and it means current request would
@@ -584,8 +584,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                 }
                         }
 
-                        if (unlikely(req_mode == LCK_GROUP &&
-                                     (lock->l_req_mode != lock->l_granted_mode))) {
+                       if (unlikely(req_mode == LCK_GROUP &&
+                                    !ldlm_is_granted(lock))) {
                                 scan = 1;
                                 compat = 0;
                                 if (lock->l_req_mode != LCK_GROUP) {
@@ -792,7 +792,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
                                                        NULL : work_list;
        ENTRY;
 
-       LASSERT(lock->l_granted_mode != lock->l_req_mode);
+       LASSERT(!ldlm_is_granted(lock));
        LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
                !ldlm_is_ast_discard_data(lock));
        check_res_locked(res);
@@ -1035,7 +1035,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
         struct ldlm_extent *extent;
        int idx, rc;
 
-        LASSERT(lock->l_granted_mode == lock->l_req_mode);
+       LASSERT(ldlm_is_granted(lock));
 
         node = lock->l_tree_node;
         LASSERT(node != NULL);
index fd1c62c..4d66c20 100644 (file)
@@ -210,7 +210,7 @@ int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
 
        ENTRY;
 
-       LASSERT(lock->l_granted_mode != lock->l_req_mode);
+       LASSERT(!ldlm_is_granted(lock));
        check_res_locked(res);
 
        if (intention == LDLM_PROCESS_RESCAN) {
index 0f184e3..d2235c0 100644 (file)
@@ -357,8 +357,7 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
         int ret = 0;
 
         lock_res_and_lock(lock);
-       if ((lock->l_req_mode == lock->l_granted_mode) &&
-            !ldlm_is_cp_reqd(lock))
+       if (ldlm_is_granted(lock) && !ldlm_is_cp_reqd(lock))
                ret = 1;
        else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
                 ret = 1;
index 237fe67..748c01e 100644 (file)
@@ -1084,7 +1084,7 @@ void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
 {
        struct sl_insert_point prev;
 
-       LASSERT(lock->l_req_mode == lock->l_granted_mode);
+       LASSERT(ldlm_is_granted(lock));
 
        search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
        ldlm_granted_list_add_lock(lock, &prev);
@@ -1759,7 +1759,7 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
                         *flags |= LDLM_FL_LOCK_CHANGED;
                         RETURN(0);
                } else if (rc != ELDLM_OK &&
-                          lock->l_req_mode == lock->l_granted_mode) {
+                          ldlm_is_granted(lock)) {
                        LASSERT(*flags & LDLM_FL_RESENT);
                        /* It may happen that ns_policy returns an error in
                         * resend case, object may be unlinked or just some
@@ -1782,7 +1782,7 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
                 * Take NO_TIMEOUT from the lock as it is inherited through
                 * LDLM_FL_INHERIT_MASK */
                *flags |= LDLM_FL_LOCK_CHANGED;
-               if (lock->l_req_mode != lock->l_granted_mode)
+               if (!ldlm_is_granted(lock))
                        *flags |= LDLM_FL_BLOCK_GRANTED;
                *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
                RETURN(ELDLM_OK);
@@ -1795,8 +1795,8 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
        if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
                OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
 
-        lock_res_and_lock(lock);
-        if (local && lock->l_req_mode == lock->l_granted_mode) {
+       lock_res_and_lock(lock);
+       if (local && ldlm_is_granted(lock)) {
                 /* The server returned a blocked lock, but it was granted
                  * before we got a chance to actually enqueue it.  We don't
                  * need to do anything else. */
@@ -1993,7 +1993,7 @@ int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
                        RETURN(-EAGAIN);
 
                /* lock was granted while resource was unlocked. */
-               if (lock->l_granted_mode == lock->l_req_mode) {
+               if (ldlm_is_granted(lock)) {
                        /* bug 11300: if the lock has been granted,
                         * break earlier because otherwise, we will go
                         * to restart and ldlm_resource_unlink will be
@@ -2447,8 +2447,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
         ldlm_resource_unlink_lock(lock);
         ldlm_lock_destroy_nolock(lock);
 
-        if (lock->l_granted_mode == lock->l_req_mode)
-                ldlm_pool_del(&ns->ns_pool, lock);
+       if (ldlm_is_granted(lock))
+               ldlm_pool_del(&ns->ns_pool, lock);
 
         /* Make sure we will not be called again for same lock what is possible
          * if not to zero out lock->l_granted_mode */
index 4df7ec8..058585d 100644 (file)
@@ -397,7 +397,7 @@ static void ldlm_add_blocked_lock(struct ldlm_lock *lock)
 {
        spin_lock_bh(&lock->l_export->exp_bl_list_lock);
        if (list_empty(&lock->l_exp_list)) {
-               if (lock->l_granted_mode != lock->l_req_mode)
+               if (!ldlm_is_granted(lock))
                        list_add_tail(&lock->l_exp_list,
                                      &lock->l_export->exp_bl_list);
                else
@@ -885,7 +885,7 @@ int ldlm_server_blocking_ast(struct ldlm_lock *lock,
                RETURN(0);
        }
 
-       if (lock->l_granted_mode != lock->l_req_mode) {
+       if (!ldlm_is_granted(lock)) {
                /* this blocking AST will be communicated as part of the
                 * completion AST instead */
                ldlm_add_blocked_lock(lock);
@@ -915,7 +915,7 @@ int ldlm_server_blocking_ast(struct ldlm_lock *lock,
 
                req->rq_no_resend = 1;
        } else {
-               LASSERT(lock->l_granted_mode == lock->l_req_mode);
+               LASSERT(ldlm_is_granted(lock));
                ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
                unlock_res_and_lock(lock);
 
@@ -1377,7 +1377,7 @@ existing_lock:
                                bl_lock->l_policy_data.l_inodebits.bits;
                }
                dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
-                if (lock->l_granted_mode == lock->l_req_mode) {
+               if (ldlm_is_granted(lock)) {
                         /*
                          * Only cancel lock if it was granted, because it would
                          * be destroyed immediately and would never be granted
@@ -1802,7 +1802,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
                while (to > 0) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        schedule_timeout(to);
-                       if (lock->l_granted_mode == lock->l_req_mode ||
+                       if (ldlm_is_granted(lock) ||
                            ldlm_is_destroyed(lock))
                                break;
                }
@@ -1844,7 +1844,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
        }
 
        if (ldlm_is_destroyed(lock) ||
-           lock->l_granted_mode == lock->l_req_mode) {
+           ldlm_is_granted(lock)) {
                /* bug 11300: the lock has already been granted */
                unlock_res_and_lock(lock);
                LDLM_DEBUG(lock, "Double grant race happened");
@@ -2477,10 +2477,10 @@ static int ldlm_revoke_lock_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 
         lock_res_and_lock(lock);
 
-        if (lock->l_req_mode != lock->l_granted_mode) {
-                unlock_res_and_lock(lock);
-                return 0;
-        }
+       if (!ldlm_is_granted(lock)) {
+               unlock_res_and_lock(lock);
+               return 0;
+       }
 
         LASSERT(lock->l_resource);
         if (lock->l_resource->lr_type != LDLM_IBITS &&
index aa074f5..6407fd2 100644 (file)
@@ -134,7 +134,7 @@ int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
        int rc;
        ENTRY;
 
-       LASSERT(lock->l_granted_mode != lock->l_req_mode);
+       LASSERT(!ldlm_is_granted(lock));
        check_res_locked(res);
        *err = ELDLM_OK;
 
index d7b1b96..a6310d9 100644 (file)
@@ -516,9 +516,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
 
         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
         lock_res_and_lock(lock);
-        /* Check that lock is not granted or failed, we might race. */
-        if ((lock->l_req_mode != lock->l_granted_mode) &&
-           !ldlm_is_failed(lock)) {
+       /* Check that lock is not granted or failed, we might race. */
+       if (!ldlm_is_granted(lock) && !ldlm_is_failed(lock)) {
                /* Make sure that this lock will not be found by raced
                 * bl_ast and -EINVAL reply is sent to server anyways.
                 * b=17645*/
@@ -702,7 +701,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
                 * Cannot unlock after the check either, a that still leaves
                 * a tiny window for completion to get in */
                lock_res_and_lock(lock);
-               if (lock->l_req_mode != lock->l_granted_mode)
+               if (!ldlm_is_granted(lock))
                        rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
                                           lock->l_lvb_data, lvb_len);
                unlock_res_and_lock(lock);
@@ -2449,7 +2448,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         * This happens whenever a lock enqueue is the request that triggers
         * recovery.
         */
-       if (lock->l_granted_mode == lock->l_req_mode)
+       if (ldlm_is_granted(lock))
                flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
        else if (!list_empty(&lock->l_res_link))
                flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
index 53b8a74..583a217 100644 (file)
@@ -466,6 +466,10 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                break;
        }
        case LDLM_CB_CANCELING:
+               /* Nothing to do for non-granted locks */
+               if (!ldlm_is_granted(lock))
+                       break;
+
                if (ldlm_is_converting(lock)) {
                        /* this is called on already converted lock, so
                         * ibits has remained bits only and cancel_bits
index 97df082..d4e11ca 100644 (file)
@@ -389,7 +389,7 @@ static int mdt_lvbo_fill(const struct lu_env *env, struct ldlm_lock *lock,
        }
 
        /* Only fill layout if layout lock is granted */
-       if (!ldlm_has_layout(lock) || lock->l_granted_mode != lock->l_req_mode)
+       if (!ldlm_has_layout(lock) || !ldlm_is_granted(lock))
                GOTO(out, rc = 0);
 
        /* XXX get fid by resource id. why don't include fid in ldlm_resource */
index ebdfb37..19beda2 100644 (file)
@@ -106,7 +106,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
 
        if (! ergo(ols->ols_state == OLS_GRANTED,
                   olock != NULL &&
-                  olock->l_req_mode == olock->l_granted_mode &&
+                  ldlm_is_granted(olock) &&
                   ols->ols_hold))
                return 0;
        return 1;
@@ -230,7 +230,7 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
 
        /* Lock must have been granted. */
        lock_res_and_lock(dlmlock);
-       if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+       if (ldlm_is_granted(dlmlock)) {
                struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
                struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
 
@@ -339,7 +339,7 @@ static int osc_lock_upcall_speculative(void *cookie,
        LASSERT(dlmlock != NULL);
 
        lock_res_and_lock(dlmlock);
-       LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+       LASSERT(ldlm_is_granted(dlmlock));
 
        /* there is no osc_lock associated with speculative locks */
        osc_lock_lvb_update(env, osc, dlmlock, NULL);
@@ -407,7 +407,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
        LASSERT(flag == LDLM_CB_CANCELING);
 
        lock_res_and_lock(dlmlock);
-       if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+       if (!ldlm_is_granted(dlmlock)) {
                dlmlock->l_ast_data = NULL;
                unlock_res_and_lock(dlmlock);
                RETURN(0);
index 43b5d47..52120fb 100644 (file)
@@ -3154,7 +3154,7 @@ static int osc_cancel_weight(struct ldlm_lock *lock)
         * Cancel all unused and granted extent lock.
         */
        if (lock->l_resource->lr_type == LDLM_EXTENT &&
-           lock->l_granted_mode == lock->l_req_mode &&
+           ldlm_is_granted(lock) &&
            osc_ldlm_weigh_ast(lock) == 0)
                RETURN(1);