Whamcloud - gitweb
LU-11518 ldlm: control lru_size for extent lock
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
index aa2eeea..ded6c7c 100644 (file)
@@ -494,7 +494,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
 
         lu_ref_init(&lock->l_reference);
         lu_ref_add(&lock->l_reference, "hash", lock);
-        lock->l_callback_timeout = 0;
+       lock->l_callback_timestamp = 0;
        lock->l_activity = 0;
 
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
@@ -515,16 +515,16 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
                               const struct ldlm_res_id *new_resid)
 {
-        struct ldlm_resource *oldres = lock->l_resource;
+       struct ldlm_resource *oldres;
         struct ldlm_resource *newres;
         int type;
         ENTRY;
 
         LASSERT(ns_is_client(ns));
 
-        lock_res_and_lock(lock);
-        if (memcmp(new_resid, &lock->l_resource->lr_name,
-                   sizeof(lock->l_resource->lr_name)) == 0) {
+       oldres = lock_res_and_lock(lock);
+       if (memcmp(new_resid, &oldres->lr_name,
+                  sizeof(oldres->lr_name)) == 0) {
                 /* Nothing to do */
                 unlock_res_and_lock(lock);
                 RETURN(0);
@@ -617,7 +617,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
        /* It's unlikely but possible that someone marked the lock as
         * destroyed after we did handle2object on it */
        if ((flags == 0) && !ldlm_is_destroyed(lock)) {
-               lu_ref_add_atomic(&lock->l_reference, "handle", current);
+               lu_ref_add_atomic(&lock->l_reference, "handle", lock);
                RETURN(lock);
        }
 
@@ -625,7 +625,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
 
        LASSERT(lock->l_resource != NULL);
 
-       lu_ref_add_atomic(&lock->l_reference, "handle", current);
+       lu_ref_add_atomic(&lock->l_reference, "handle", lock);
        if (unlikely(ldlm_is_destroyed(lock))) {
                unlock_res_and_lock(lock);
                CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
@@ -844,14 +844,15 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
  */
 void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
 {
-        struct ldlm_namespace *ns;
-        ENTRY;
+       struct ldlm_namespace *ns;
 
-        lock_res_and_lock(lock);
+       ENTRY;
+
+       lock_res_and_lock(lock);
 
-        ns = ldlm_lock_to_ns(lock);
+       ns = ldlm_lock_to_ns(lock);
 
-        ldlm_lock_decref_internal_nolock(lock, mode);
+       ldlm_lock_decref_internal_nolock(lock, mode);
 
        if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
            !lock->l_readers && !lock->l_writers) {
@@ -868,52 +869,49 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
        }
 
        if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
+               unsigned int mask = D_DLMTRACE;
+
                /* If we received a blocked AST and this was the last reference,
                 * run the callback. */
                if (ldlm_is_ns_srv(lock) && lock->l_export)
-                        CERROR("FL_CBPENDING set on non-local lock--just a "
-                               "warning\n");
+                       mask |= D_WARNING;
+               LDLM_DEBUG_LIMIT(mask, lock,
+                                "final decref done on %sCBPENDING lock",
+                                mask & D_WARNING ? "non-local " : "");
 
-                LDLM_DEBUG(lock, "final decref done on cbpending lock");
-
-                LDLM_LOCK_GET(lock); /* dropped by bl thread */
-                ldlm_lock_remove_from_lru(lock);
-                unlock_res_and_lock(lock);
+               LDLM_LOCK_GET(lock); /* dropped by bl thread */
+               ldlm_lock_remove_from_lru(lock);
+               unlock_res_and_lock(lock);
 
                if (ldlm_is_fail_loc(lock))
-                        OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
+                       OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
 
                if (ldlm_is_atomic_cb(lock) ||
                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
-                        ldlm_handle_bl_callback(ns, NULL, lock);
+                       ldlm_handle_bl_callback(ns, NULL, lock);
         } else if (ns_is_client(ns) &&
-                   !lock->l_readers && !lock->l_writers &&
+                  !lock->l_readers && !lock->l_writers &&
                   !ldlm_is_no_lru(lock) &&
                   !ldlm_is_bl_ast(lock) &&
                   !ldlm_is_converting(lock)) {
 
-                LDLM_DEBUG(lock, "add lock into lru list");
-
-                /* If this is a client-side namespace and this was the last
-                 * reference, put it on the LRU. */
-                ldlm_lock_add_to_lru(lock);
-                unlock_res_and_lock(lock);
+               /* If this is a client-side namespace and this was the last
+                * reference, put it on the LRU.
+                */
+               ldlm_lock_add_to_lru(lock);
+               unlock_res_and_lock(lock);
+               LDLM_DEBUG(lock, "add lock into lru list");
 
                if (ldlm_is_fail_loc(lock))
-                        OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
-                /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
-                 * are not supported by the server, otherwise, it is done on
-                 * enqueue. */
-                if (!exp_connect_cancelset(lock->l_conn_export) &&
-                    !ns_connect_lru_resize(ns))
-                       ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
-        } else {
-                LDLM_DEBUG(lock, "do not add lock into lru list");
-                unlock_res_and_lock(lock);
-        }
+                       OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
 
-        EXIT;
+               ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
+       } else {
+               LDLM_DEBUG(lock, "do not add lock into lru list");
+               unlock_res_and_lock(lock);
+       }
+
+       EXIT;
 }
 
 /**
@@ -1179,7 +1177,7 @@ static int lock_matches(struct ldlm_lock *lock, struct ldlm_match_data *data)
        if (ldlm_is_cbpending(lock) &&
            !(data->lmd_flags & LDLM_FL_CBPENDING))
                return INTERVAL_ITER_CONT;
-       if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+       if (!(data->lmd_match & LDLM_MATCH_UNREF) && ldlm_is_cbpending(lock) &&
            lock->l_readers == 0 && lock->l_writers == 0)
                return INTERVAL_ITER_CONT;
 
@@ -1189,11 +1187,12 @@ static int lock_matches(struct ldlm_lock *lock, struct ldlm_match_data *data)
        /* When we search for ast_data, we are not doing a traditional match,
         * so we don't worry about IBITS or extent matching.
         */
-       if (data->lmd_has_ast_data) {
+       if (data->lmd_match & (LDLM_MATCH_AST | LDLM_MATCH_AST_ANY)) {
                if (!lock->l_ast_data)
                        return INTERVAL_ITER_CONT;
 
-               goto matched;
+               if (data->lmd_match & LDLM_MATCH_AST_ANY)
+                       goto matched;
        }
 
        match = lock->l_req_mode;
@@ -1223,7 +1222,7 @@ static int lock_matches(struct ldlm_lock *lock, struct ldlm_match_data *data)
 
        /* We match if we have existing lock with same or wider set
           of bits. */
-       if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+       if (!(data->lmd_match & LDLM_MATCH_UNREF) && LDLM_HAVE_MASK(lock, GONE))
                return INTERVAL_ITER_CONT;
 
        if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
@@ -1404,7 +1403,8 @@ enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
                                         enum ldlm_type type,
                                         union ldlm_policy_data *policy,
                                         enum ldlm_mode mode,
-                                        struct lustre_handle *lockh, int unref)
+                                        struct lustre_handle *lockh,
+                                        enum ldlm_match_flags match_flags)
 {
        struct ldlm_match_data data = {
                .lmd_old = NULL,
@@ -1413,8 +1413,7 @@ enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
                .lmd_policy = policy,
                .lmd_flags = flags,
                .lmd_skip_flags = skip_flags,
-               .lmd_unref = unref,
-               .lmd_has_ast_data = false,
+               .lmd_match = match_flags,
        };
        struct ldlm_resource *res;
        struct ldlm_lock *lock;
@@ -1671,7 +1670,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
 
        lock->l_req_mode = mode;
        lock->l_ast_data = data;
-       lock->l_pid = current_pid();
+       lock->l_pid = current->pid;
        if (ns_is_server(ns))
                ldlm_set_ns_srv(lock);
        if (cbs) {
@@ -1756,8 +1755,8 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
                                  void *cookie, __u64 *flags)
 {
        struct ldlm_lock *lock = *lockp;
-       struct ldlm_resource *res = lock->l_resource;
-       int local = ns_is_client(ldlm_res_to_ns(res));
+       struct ldlm_resource *res;
+       int local = ns_is_client(ns);
        enum ldlm_error rc = ELDLM_OK;
        struct ldlm_interval *node = NULL;
 #ifdef HAVE_SERVER_SUPPORT
@@ -1811,15 +1810,21 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
                RETURN(ELDLM_OK);
        }
 
+#ifdef HAVE_SERVER_SUPPORT
        /* For a replaying lock, it might be already in granted list. So
         * unlinking the lock will cause the interval node to be freed, we
         * have to allocate the interval node early otherwise we can't regrant
-        * this lock in the future. - jay */
-       if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
+        * this lock in the future. - jay
+        *
+        * The only time the ldlm_resource changes for the ldlm_lock is when
+        * ldlm_lock_change_resource() is called and that only happens for
+        * the Lustre client case.
+        */
+       if (!local && (*flags & LDLM_FL_REPLAY) &&
+           lock->l_resource->lr_type == LDLM_EXTENT)
                OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
 
-#ifdef HAVE_SERVER_SUPPORT
-       reconstruct = !local && res->lr_type == LDLM_FLOCK &&
+       reconstruct = !local && lock->l_resource->lr_type == LDLM_FLOCK &&
                      !(*flags & LDLM_FL_TEST_LOCK);
        if (reconstruct) {
                rc = req_can_reconstruct(cookie, NULL);
@@ -1830,8 +1835,7 @@ enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
                }
        }
 #endif
-
-       lock_res_and_lock(lock);
+       res = lock_res_and_lock(lock);
        if (local && ldlm_is_granted(lock)) {
                 /* The server returned a blocked lock, but it was granted
                  * before we got a chance to actually enqueue it.  We don't
@@ -2789,7 +2793,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                                 lock->l_flags, nid,
                                 lock->l_remote_handle.cookie,
                                 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
-                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_pid, lock->l_callback_timestamp,
                                 lock->l_lvb_type);
                 va_end(args);
                 return;
@@ -2815,7 +2819,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                                 lock->l_flags, nid,
                                 lock->l_remote_handle.cookie,
                                 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
-                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_pid, lock->l_callback_timestamp,
                                 lock->l_lvb_type);
                break;
 
@@ -2838,7 +2842,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                                 lock->l_flags, nid,
                                 lock->l_remote_handle.cookie,
                                 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
-                                lock->l_pid, lock->l_callback_timeout);
+                                lock->l_pid, lock->l_callback_timestamp);
                break;
 
        case LDLM_IBITS:
@@ -2859,7 +2863,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                                 lock->l_flags, nid,
                                 lock->l_remote_handle.cookie,
                                 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
-                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_pid, lock->l_callback_timestamp,
                                 lock->l_lvb_type);
                break;
 
@@ -2879,7 +2883,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                                 lock->l_flags, nid,
                                 lock->l_remote_handle.cookie,
                                 exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
-                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_pid, lock->l_callback_timestamp,
                                 lock->l_lvb_type);
                break;
        }