Whamcloud - gitweb
b=2262
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
index 08281b0..771ed62 100644 (file)
@@ -521,8 +521,9 @@ struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
                  * rpc right on enqueue, what will make it slower, vs. 
                  * asynchronous rpc in blocking thread. */
-                count += ldlm_cancel_lru_local(ns, cancels, 1, avail - count,
-                                               LDLM_CANCEL_AGED);
+                count += ldlm_cancel_lru_local(ns, cancels,
+                                               exp_connect_lru_resize(exp) ? 0 : 1,
+                                               avail - count, LDLM_CANCEL_AGED);
                 size[DLM_LOCKREQ_OFF] =
                         ldlm_request_bufsize(count, LDLM_ENQUEUE);
         }
@@ -942,6 +943,50 @@ out:
         return sent ? sent : rc;
 }
 
+static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
+{
+        LASSERT(imp != NULL);
+        return &imp->imp_obd->obd_namespace->ns_pool;
+}
+
+int ldlm_cli_update_pool(struct ptlrpc_request *req)
+{
+        struct ldlm_pool *pl;
+        ENTRY;
+    
+        if (!imp_connect_lru_resize(req->rq_import))
+                RETURN(0);
+
+        pl = ldlm_imp2pl(req->rq_import);
+        
+        spin_lock(&pl->pl_lock);
+#ifdef __KERNEL__
+        {
+                __u64 old_slv, fast_slv_change;
+
+                old_slv = ldlm_pool_get_slv(pl);
+                fast_slv_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;
+                do_div(fast_slv_change, 100);
+#endif
+                pl->pl_update_time = cfs_time_current();
+                ldlm_pool_set_slv(pl, lustre_msg_get_slv(req->rq_repmsg));
+                ldlm_pool_set_limit(pl, lustre_msg_get_limit(req->rq_repmsg));
+#ifdef __KERNEL__
+                /* Wake up pools thread only if SLV has changed more than 
+                 * 5% since last update. In this case we want to react asap. 
+                 * Otherwise it is no sense to wake up pools as they are 
+                 * re-calculated every 1s anyways. */
+                if (old_slv > ldlm_pool_get_slv(pl) && 
+                    old_slv - ldlm_pool_get_slv(pl) > fast_slv_change)
+                        ldlm_pools_wakeup();
+        }
+#endif
+        spin_unlock(&pl->pl_lock);
+
+        RETURN(0);
+}
+EXPORT_SYMBOL(ldlm_cli_update_pool);
+
 int ldlm_cli_cancel(struct lustre_handle *lockh)
 {
         struct ldlm_lock *lock;
@@ -991,13 +1036,20 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                           int count, int max, int flags)
 {
         cfs_time_t cur = cfs_time_current();
+        int rc, added = 0, left, unused;
         struct ldlm_lock *lock, *next;
-        int rc, added = 0, left;
+        __u64 slv, lvf, lv;
         ENTRY;
 
         spin_lock(&ns->ns_unused_lock);
-        count += ns->ns_nr_unused - ns->ns_max_unused;
+        unused = ns->ns_nr_unused;
+        
+        if (!ns_connect_lru_resize(ns))
+                count += unused - ns->ns_max_unused;
+
         while (!list_empty(&ns->ns_unused_list)) {
+                struct ldlm_pool *pl = &ns->ns_pool;
+
                 if (max && added >= max)
                         break;
 
@@ -1011,11 +1063,38 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 if (&lock->l_lru == &ns->ns_unused_list)
                         break;
 
-                if ((added >= count) && 
-                    (!(flags & LDLM_CANCEL_AGED) ||
-                     cfs_time_before_64(cur, (__u64)ns->ns_max_age +
-                                        lock->l_last_used)))
-                        break;
+                if (ns_connect_lru_resize(ns)) {
+                        cfs_time_t la;
+                        
+                        /* Take into account SLV only if cpount == 0. */
+                        if (count == 0) {
+                                /* Calculate lv for every lock. */
+                                spin_lock(&pl->pl_lock);
+                                slv = ldlm_pool_get_slv(pl);
+                                lvf = atomic_read(&pl->pl_lock_volume_factor);
+                                spin_unlock(&pl->pl_lock);
+
+                                la = cfs_duration_sec(cfs_time_sub(cur, 
+                                                      lock->l_last_used));
+                                if (la == 0)
+                                        la = 1;
+                                
+                                /* Stop when slv is not yet come from server 
+                                 * or lv is smaller than it is. */
+                                lv = lvf * la * unused;
+                                if (slv == 1 || lv < slv)
+                                        break;
+                        } else {
+                                if (added >= count)
+                                        break;
+                        }
+                } else {
+                        if ((added >= count) && 
+                            (!(flags & LDLM_CANCEL_AGED) ||
+                             cfs_time_before_64(cur, ns->ns_max_age +
+                                                lock->l_last_used)))
+                                break;
+                }
 
                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
                 spin_unlock(&ns->ns_unused_lock);
@@ -1060,6 +1139,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
                 unlock_res_and_lock(lock);
                 spin_lock(&ns->ns_unused_lock);
                 added++;
+                unused--;
         }
         spin_unlock(&ns->ns_unused_lock);
 
@@ -1094,7 +1174,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
  * in a thread and this function will return after the thread has been
  * asked to call the callback.  when called with LDLM_SYNC the blocking
  * callback will be performed in this function. */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync)
 {
         CFS_LIST_HEAD(cancels);
         int count, rc;
@@ -1103,7 +1183,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
 #ifndef __KERNEL__
         sync = LDLM_SYNC; /* force to be sync in user space */
 #endif
-        count = ldlm_cancel_lru_local(ns, &cancels, 0, 0, 0);
+        count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0);
         if (sync == LDLM_ASYNC) {
                 rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count);
                 if (rc == 0)
@@ -1113,7 +1193,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, ldlm_sync_t sync)
         /* If an error occured in ASYNC mode, or
          * this is SYNC mode, cancel the list. */
         ldlm_cli_cancel_list(&cancels, count, NULL, 0, 0);
-        RETURN(0);
+        RETURN(count);
 }
 
 /* Find and cancel locally unused locks found on resource, matched to the
@@ -1370,12 +1450,7 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns,
                     !lock->l_readers && !lock->l_writers &&
                     !(lock->l_flags & LDLM_FL_LOCAL) &&
                     !(lock->l_flags & LDLM_FL_CBPENDING)) {
-                        lock->l_last_used = cfs_time_current();
-                        spin_lock(&ns->ns_unused_lock);
-                        LASSERT(ns->ns_nr_unused >= 0);
-                        list_add_tail(&lock->l_lru, &ns->ns_unused_list);
-                        ns->ns_nr_unused++;
-                        spin_unlock(&ns->ns_unused_lock);
+                        ldlm_lock_add_to_lru(lock);
                         lock->l_flags &= ~LDLM_FL_NO_LRU;
                         LDLM_DEBUG(lock, "join lock to lru");
                         count++;