Whamcloud - gitweb
b=18618
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
index 7b84a27..1eaa0e1 100644 (file)
@@ -78,11 +78,11 @@ int ldlm_expired_completion_wait(void *data)
                         RETURN(0);
 
                 LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
-                          CFS_DURATION_T"s ago); not entering recovery in "
+                           CFS_DURATION_T"s ago); not entering recovery in "
                            "server code, just going back to sleep",
-                          lock->l_enqueued_time.tv_sec,
+                           lock->l_last_activity,
                            cfs_time_sub(cfs_time_current_sec(),
-                           lock->l_enqueued_time.tv_sec));
+                           lock->l_last_activity));
                 if (cfs_time_after(cfs_time_current(), next_dump)) {
                         last_dump = next_dump;
                         next_dump = cfs_time_shift(300);
@@ -99,9 +99,8 @@ int ldlm_expired_completion_wait(void *data)
         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
         LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
                   CFS_DURATION_T"s ago), entering recovery for %s@%s",
-                  lock->l_enqueued_time.tv_sec,
-                  cfs_time_sub(cfs_time_current_sec(),
-                  lock->l_enqueued_time.tv_sec),
+                  lock->l_last_activity,
+                  cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
                   obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
 
         RETURN(0);
@@ -117,25 +116,11 @@ int ldlm_get_enq_timeout(struct ldlm_lock *lock)
         /* Since these are non-updating timeouts, we should be conservative.
            It would be nice to have some kind of "early reply" mechanism for
            lock callbacks too... */
-        timeout = timeout + (timeout >> 1); /* 150% */
+        timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
         return max(timeout, ldlm_enqueue_min);
 }
 EXPORT_SYMBOL(ldlm_get_enq_timeout);
 
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
-        int ret = 0;
-
-        lock_res_and_lock(lock);
-        if (((lock->l_req_mode == lock->l_granted_mode) &&
-             !(lock->l_flags & LDLM_FL_CP_REQD)) ||
-            (lock->l_flags & LDLM_FL_FAILED))
-                ret = 1;
-        unlock_res_and_lock(lock);
-
-        return ret;
-}
-
 /**
  * Helper function for ldlm_completion_ast(), updating timings when lock is
  * actually granted.
@@ -150,7 +135,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
                 result = -EIO;
         } else {
                 delay = cfs_time_sub(cfs_time_current_sec(),
-                                     lock->l_enqueued_time.tv_sec);
+                                     lock->l_last_activity);
                 LDLM_DEBUG(lock, "client-side enqueue: granted after "
                            CFS_DURATION_T"s", delay);
 
@@ -266,14 +251,14 @@ noreproc:
                 spin_unlock(&imp->imp_lock);
         }
 
-        if (ns_is_client(lock->l_resource->lr_namespace) && 
+        if (ns_is_client(lock->l_resource->lr_namespace) &&
             OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
                                  OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
                 lock->l_flags |= LDLM_FL_FAIL_LOC;
                 rc = -EINTR;
         } else {
                 /* Go to sleep until the lock is granted or cancelled. */
-                rc = l_wait_event(lock->l_waitq, 
+                rc = l_wait_event(lock->l_waitq,
                                   is_granted_or_cancelled(lock), &lwi);
         }
 
@@ -460,20 +445,20 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
         lock_res_and_lock(lock);
         /* Check that lock is not granted or failed, we might race. */
-        if ((lock->l_req_mode != lock->l_granted_mode) && 
+        if ((lock->l_req_mode != lock->l_granted_mode) &&
             !(lock->l_flags & LDLM_FL_FAILED)) {
                 /* Make sure that this lock will not be found by raced
-                 * bl_ast and -EINVAL reply is sent to server anyways. 
+                 * bl_ast and -EINVAL reply is sent to server anyways.
                  * bug 17645 */
-                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | 
+                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
                                  LDLM_FL_ATOMIC_CB;
                 need_cancel = 1;
         }
         unlock_res_and_lock(lock);
-  
+
         if (need_cancel) {
-                LDLM_DEBUG(lock, 
-                           "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | " 
+                LDLM_DEBUG(lock,
+                           "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
                            "LDLM_FL_ATOMIC_CB");
                 ldlm_lock_decref_and_cancel(lockh, mode);
         } else {
@@ -1328,65 +1313,6 @@ static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
 }
 
 /**
- * Callback function for shrink policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan
- * \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
-                                                   struct ldlm_lock *lock,
-                                                   int unused, int added,
-                                                   int count)
-{
-        int lock_cost;
-        __u64 page_nr;
-
-        /*
-         * Stop lru processing when we reached passed @count or checked all
-         * locks in lru.
-         */
-        if (count && added >= count)
-                return LDLM_POLICY_KEEP_LOCK;
-
-        if (lock->l_resource->lr_type == LDLM_EXTENT) {
-                if (lock->l_weigh_ast) {
-                        /*
-                         * For liblustre, l_weigh_ast should return 0 since it
-                         * don't cache pages
-                         */
-                        page_nr = lock->l_weigh_ast(lock);
-                } else {
-                        struct ldlm_extent *l_extent;
-
-                        /*
-                         * For all extent locks cost is 1 + number of pages in
-                         * their extent.
-                         */
-                        l_extent = &lock->l_policy_data.l_extent;
-                        page_nr = l_extent->end - l_extent->start;
-                        do_div(page_nr, CFS_PAGE_SIZE);
-                }
-                lock_cost = 1 + page_nr;
-        } else {
-                /*
-                 * For all locks which are not extent ones cost is 1
-                 */
-                lock_cost = 1;
-        }
-
-        /*
-         * Keep all expensive locks in lru for the memory pressure time
-         * cancel policy. They anyways may be canceled by lru resize
-         * pplicy if they have not small enough CLV.
-         */
-        return lock_cost > ns->ns_shrink_thumb ?
-                LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
  * Callback function for lru-resize policy. Makes decision whether to keep
  * \a lock in LRU for current \a LRU size \a unused, added in current scan
  * \a added and number of locks to be preferably canceled \a count.
@@ -1509,7 +1435,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
 {
         if (ns_connect_lru_resize(ns)) {
                 if (flags & LDLM_CANCEL_SHRINK)
-                        return ldlm_cancel_shrink_policy;
+                        /* We kill passed number of old locks. */
+                        return ldlm_cancel_passed_policy;
                 else if (flags & LDLM_CANCEL_LRUR)
                         return ldlm_cancel_lrur_policy;
                 else if (flags & LDLM_CANCEL_PASSED)
@@ -1661,61 +1588,6 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
         RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
 }
 
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
-                             int count, int max, int flags)
-{
-        struct list_head disp = CFS_LIST_HEAD_INIT(disp);
-        ldlm_cancel_lru_policy_t pf;
-        struct ldlm_lock *lock;
-        int added = 0, unused;
-        int loop_stop = 0;
-        ENTRY;
-
-        pf = ldlm_cancel_lru_policy(ns, flags);
-        LASSERT(pf != NULL);
-        spin_lock(&ns->ns_unused_lock);
-        unused = ns->ns_nr_unused;
-        list_splice_init(&ns->ns_unused_list, &disp);
-        while (!list_empty(&disp)) {
-                lock = list_entry(disp.next, struct ldlm_lock, l_lru);
-                list_move_tail(&lock->l_lru, &ns->ns_unused_list);
-
-                /* For any flags, stop scanning if @max is reached. */
-                if (max && added >= max)
-                        break;
-
-                /* Somebody is already doing CANCEL or there is a
-                 * blocking request will send cancel. Let's not count
-                 * this lock. */
-                if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                    (lock->l_flags & LDLM_FL_BL_AST))
-                        continue;
-
-                LDLM_LOCK_GET(lock);
-                spin_unlock(&ns->ns_unused_lock);
-                lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
-
-                /* Pass the lock through the policy filter and see if it
-                 * should stay in lru. */
-                if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
-                        loop_stop = 1;
-
-                lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
-                LDLM_LOCK_RELEASE(lock);
-                spin_lock(&ns->ns_unused_lock);
-                if (loop_stop)
-                        break;
-
-                added++;
-                unused--;
-        }
-        list_splice(&disp, ns->ns_unused_list.prev);
-        spin_unlock(&ns->ns_unused_lock);
-        RETURN(added);
-}
-
 /* when called with LDLM_ASYNC the blocking callback will be handled
  * in a thread and this function will return after the thread has been
  * asked to call the callback.  when called with LDLM_SYNC the blocking
@@ -1737,8 +1609,8 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
                         RETURN(count);
         }
 
-        /* If an error occured in ASYNC mode, or
-         * this is SYNC mode, cancel the list. */
+        /* If an error occured in ASYNC mode, or this is SYNC mode,
+         * cancel the list. */
         ldlm_cli_cancel_list(&cancels, count, NULL, 0);
         RETURN(count);
 }
@@ -2078,9 +1950,15 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
         /* we use l_pending_chain here, because it's unused on clients. */
         LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
                  lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
-        /* bug 9573: don't replay locks left after eviction */
-        if (!(lock->l_flags & LDLM_FL_FAILED))
+        /* bug 9573: don't replay locks left after eviction, or
+         * bug 17614: locks being actively cancelled. Get a reference
+         * on a lock so that it does not disapear under us (e.g. due to cancel)
+         */
+        if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
                 list_add(&lock->l_pending_chain, list);
+                LDLM_LOCK_GET(lock);
+        }
+
         return LDLM_ITER_CONTINUE;
 }
 
@@ -2117,10 +1995,10 @@ static int replay_lock_interpret(const struct lu_env *env,
         lock->l_remote_handle = reply->lock_handle;
 
         /* Key change rehash lock in per-export hash with new key */
-       exp = req->rq_export;
+        exp = req->rq_export;
         if (exp && exp->exp_lock_hash)
                 lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
-                                      &lock->l_remote_handle,
+                                       &lock->l_remote_handle,
                                        &lock->l_exp_hash);
 
         LDLM_DEBUG(lock, "replayed lock:");
@@ -2234,9 +2112,12 @@ int ldlm_replay_locks(struct obd_import *imp)
 
         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
                 list_del_init(&lock->l_pending_chain);
-                if (rc)
+                if (rc) {
+                        LDLM_LOCK_PUT(lock);
                         continue; /* or try to do the rest? */
+                }
                 rc = replay_one_lock(imp, lock);
+                LDLM_LOCK_PUT(lock);
         }
 
         atomic_dec(&imp->imp_replay_inflight);