Whamcloud - gitweb
b=17887
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
index 6bf1efe..b6fb1d1 100644 (file)
@@ -78,11 +78,11 @@ int ldlm_expired_completion_wait(void *data)
                         RETURN(0);
 
                 LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
-                          CFS_DURATION_T"s ago); not entering recovery in "
+                           CFS_DURATION_T"s ago); not entering recovery in "
                            "server code, just going back to sleep",
-                          lock->l_enqueued_time.tv_sec,
+                           lock->l_last_activity,
                            cfs_time_sub(cfs_time_current_sec(),
-                           lock->l_enqueued_time.tv_sec));
+                           lock->l_last_activity));
                 if (cfs_time_after(cfs_time_current(), next_dump)) {
                         last_dump = next_dump;
                         next_dump = cfs_time_shift(300);
@@ -99,9 +99,8 @@ int ldlm_expired_completion_wait(void *data)
         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
         LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
                   CFS_DURATION_T"s ago), entering recovery for %s@%s",
-                  lock->l_enqueued_time.tv_sec,
-                  cfs_time_sub(cfs_time_current_sec(),
-                  lock->l_enqueued_time.tv_sec),
+                  lock->l_last_activity,
+                  cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
                   obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
 
         RETURN(0);
@@ -120,20 +119,7 @@ int ldlm_get_enq_timeout(struct ldlm_lock *lock)
         timeout = timeout + (timeout >> 1); /* 150% */
         return max(timeout, ldlm_enqueue_min);
 }
-
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
-        int ret = 0;
-
-        lock_res_and_lock(lock);
-        if (((lock->l_req_mode == lock->l_granted_mode) &&
-             !(lock->l_flags & LDLM_FL_CP_REQD)) ||
-            (lock->l_flags & LDLM_FL_FAILED))
-                ret = 1;
-        unlock_res_and_lock(lock);
-
-        return ret;
-}
+EXPORT_SYMBOL(ldlm_get_enq_timeout);
 
 /**
  * Helper function for ldlm_completion_ast(), updating timings when lock is
@@ -149,7 +135,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
                 result = -EIO;
         } else {
                 delay = cfs_time_sub(cfs_time_current_sec(),
-                                     lock->l_enqueued_time.tv_sec);
+                                     lock->l_last_activity);
                 LDLM_DEBUG(lock, "client-side enqueue: granted after "
                            CFS_DURATION_T"s", delay);
 
@@ -161,8 +147,8 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
 }
 
 /**
- * Implementation of ->l_completion_ast() for a client that doesn't wait
- * until lock is granted. Suitable for locks enqueued through ptlrpcd or
+ * Implementation of ->l_completion_ast() for a client, that doesn't wait
+ * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
  * other threads that cannot block for long.
  */
 int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
@@ -183,6 +169,7 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "going forward");
         ldlm_lock_dump(D_OTHER, lock, 0);
+        ldlm_reprocess_all(lock->l_resource);
         RETURN(0);
 }
 
@@ -264,8 +251,16 @@ noreproc:
                 spin_unlock(&imp->imp_lock);
         }
 
-        /* Go to sleep until the lock is granted or cancelled. */
-        rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
+        if (ns_is_client(lock->l_resource->lr_namespace) &&
+            OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
+                                 OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
+                lock->l_flags |= LDLM_FL_FAIL_LOC;
+                rc = -EINTR;
+        } else {
+                /* Go to sleep until the lock is granted or cancelled. */
+                rc = l_wait_event(lock->l_waitq,
+                                  is_granted_or_cancelled(lock), &lwi);
+        }
 
         if (rc) {
                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
@@ -276,31 +271,22 @@ noreproc:
         RETURN(ldlm_completion_tail(lock));
 }
 
-/*
- * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
+/**
+ * A helper to build a blocking ast function
+ *
+ * Perform a common operation for blocking asts:
+ * defferred lock cancellation.
+ *
+ * \param lock the lock blocking or canceling ast was called on
+ * \retval 0
+ * \see mdt_blocking_ast
+ * \see ldlm_blocking_ast
  */
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
-                      void *data, int flag)
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
 {
         int do_ast;
         ENTRY;
 
-        if (flag == LDLM_CB_CANCELING) {
-                /* Don't need to do anything here. */
-                RETURN(0);
-        }
-
-        lock_res_and_lock(lock);
-        /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
-         * that ldlm_blocking_ast is called just before intent_policy method
-         * takes the ns_lock, then by the time we get the lock, we might not
-         * be the correct blocking function anymore.  So check, and return
-         * early, if so. */
-        if (lock->l_blocking_ast != ldlm_blocking_ast) {
-                unlock_res_and_lock(lock);
-                RETURN(0);
-        }
-
         lock->l_flags |= LDLM_FL_CBPENDING;
         do_ast = (!lock->l_readers && !lock->l_writers);
         unlock_res_and_lock(lock);
@@ -321,6 +307,42 @@ int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
         RETURN(0);
 }
 
+/**
+ * Server blocking AST
+ *
+ * ->l_blocking_ast() callback for LDLM locks acquired by server-side
+ * OBDs.
+ *
+ * \param lock the lock which blocks a request or cancelling lock
+ * \param desc unused
+ * \param data unused
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0
+ * \see ldlm_blocking_ast_nocheck
+ */
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+                      void *data, int flag)
+{
+        ENTRY;
+
+        if (flag == LDLM_CB_CANCELING) {
+                /* Don't need to do anything here. */
+                RETURN(0);
+        }
+
+        lock_res_and_lock(lock);
+        /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
+         * that ldlm_blocking_ast is called just before intent_policy method
+         * takes the ns_lock, then by the time we get the lock, we might not
+         * be the correct blocking function anymore.  So check, and return
+         * early, if so. */
+        if (lock->l_blocking_ast != ldlm_blocking_ast) {
+                unlock_res_and_lock(lock);
+                RETURN(0);
+        }
+        RETURN(ldlm_blocking_ast_nocheck(lock));
+}
+
 /*
  * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
  * comment in filter_intent_policy() on why you may need this.
@@ -356,6 +378,7 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
                            ldlm_completion_callback completion,
                            ldlm_glimpse_callback glimpse,
                            void *data, __u32 lvb_len, void *lvb_swabber,
+                           const __u64 *client_cookie,
                            struct lustre_handle *lockh)
 {
         struct ldlm_lock *lock;
@@ -387,6 +410,8 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
         unlock_res_and_lock(lock);
         if (policy != NULL)
                 lock->l_policy_data = *policy;
+        if (client_cookie != NULL)
+                lock->l_client_cookie = *client_cookie;
         if (type == LDLM_EXTENT)
                 lock->l_req_extent = policy->l_extent;
 
@@ -415,13 +440,31 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
                                 struct ldlm_lock *lock,
                                 struct lustre_handle *lockh, int mode)
 {
+        int need_cancel = 0;
+
         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
         lock_res_and_lock(lock);
-        lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+        /* Check that lock is not granted or failed, we might race. */
+        if ((lock->l_req_mode != lock->l_granted_mode) &&
+            !(lock->l_flags & LDLM_FL_FAILED)) {
+                /* Make sure that this lock will not be found by raced
+                 * bl_ast and -EINVAL reply is sent to server anyways.
+                 * bug 17645 */
+                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+                                 LDLM_FL_ATOMIC_CB;
+                need_cancel = 1;
+        }
         unlock_res_and_lock(lock);
-        LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
 
-        ldlm_lock_decref_and_cancel(lockh, mode);
+        if (need_cancel) {
+                LDLM_DEBUG(lock,
+                           "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+                           "LDLM_FL_ATOMIC_CB");
+                ldlm_lock_decref_and_cancel(lockh, mode);
+        } else {
+                LDLM_DEBUG(lock, "lock was granted or failed in race");
+                ldlm_lock_decref(lockh, mode);
+        }
 
         /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
          *       from llite/file.c/ll_file_flock(). */
@@ -832,7 +875,9 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
         }
 
         LDLM_DEBUG(lock, "sending request");
+
         rc = ptlrpc_queue_wait(req);
+
         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
                                     einfo->ei_mode, flags, lvb, lvb_len,
                                     lvb_swabber, lockh, rc);
@@ -1089,7 +1134,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
 
                 ptlrpc_request_set_replen(req);
                 if (flags & LDLM_FL_ASYNC) {
-                        ptlrpcd_add_req(req);
+                        ptlrpcd_add_req(req, PSCOPE_OTHER);
                         sent = count;
                         GOTO(out, 0);
                 } else {
@@ -1135,7 +1180,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
         __u64 old_slv, new_slv;
         __u32 new_limit;
         ENTRY;
-
         if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
                      !imp_connect_lru_resize(req->rq_import)))
         {
@@ -1269,65 +1313,6 @@ static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
 }
 
 /**
- * Callback function for shrink policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan
- * \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
-                                                   struct ldlm_lock *lock,
-                                                   int unused, int added,
-                                                   int count)
-{
-        int lock_cost;
-        __u64 page_nr;
-
-        /*
-         * Stop lru processing when we reached passed @count or checked all
-         * locks in lru.
-         */
-        if (count && added >= count)
-                return LDLM_POLICY_KEEP_LOCK;
-
-        if (lock->l_resource->lr_type == LDLM_EXTENT) {
-                if (lock->l_weigh_ast) {
-                        /*
-                         * For liblustre, l_weigh_ast should return 0 since it
-                         * don't cache pages
-                         */
-                        page_nr = lock->l_weigh_ast(lock);
-                } else {
-                struct ldlm_extent *l_extent;
-
-                /*
-                 * For all extent locks cost is 1 + number of pages in
-                 * their extent.
-                 */
-                l_extent = &lock->l_policy_data.l_extent;
-                        page_nr = l_extent->end - l_extent->start;
-                do_div(page_nr, CFS_PAGE_SIZE);
-                }
-                lock_cost = 1 + page_nr;
-        } else {
-                /*
-                 * For all locks which are not extent ones cost is 1
-                 */
-                lock_cost = 1;
-        }
-
-        /*
-         * Keep all expensive locks in lru for the memory pressure time
-         * cancel policy. They anyways may be canceled by lru resize
-         * pplicy if they have not small enough CLV.
-         */
-        return lock_cost > ns->ns_shrink_thumb ?
-                LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
  * Callback function for lru-resize policy. Makes decision whether to keep
  * \a lock in LRU for current \a LRU size \a unused, added in current scan
  * \a added and number of locks to be preferably canceled \a count.
@@ -1450,7 +1435,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
 {
         if (ns_connect_lru_resize(ns)) {
                 if (flags & LDLM_CANCEL_SHRINK)
-                        return ldlm_cancel_shrink_policy;
+                        /* We kill passed number of old locks. */
+                        return ldlm_cancel_passed_policy;
                 else if (flags & LDLM_CANCEL_LRUR)
                         return ldlm_cancel_lrur_policy;
                 else if (flags & LDLM_CANCEL_PASSED)
@@ -1602,61 +1588,6 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
         RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
 }
 
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
-                             int count, int max, int flags)
-{
-        struct list_head disp = CFS_LIST_HEAD_INIT(disp);
-        ldlm_cancel_lru_policy_t pf;
-        struct ldlm_lock *lock;
-        int added = 0, unused;
-        int loop_stop = 0;
-        ENTRY;
-
-        pf = ldlm_cancel_lru_policy(ns, flags);
-        LASSERT(pf != NULL);
-        spin_lock(&ns->ns_unused_lock);
-        unused = ns->ns_nr_unused;
-        list_splice_init(&ns->ns_unused_list, &disp);
-        while (!list_empty(&disp)) {
-                lock = list_entry(disp.next, struct ldlm_lock, l_lru);
-                list_move_tail(&lock->l_lru, &ns->ns_unused_list);
-
-                /* For any flags, stop scanning if @max is reached. */
-                if (max && added >= max)
-                        break;
-
-                /* Somebody is already doing CANCEL or there is a
-                 * blocking request will send cancel. Let's not count
-                 * this lock. */
-                if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                    (lock->l_flags & LDLM_FL_BL_AST))
-                        continue;
-
-                LDLM_LOCK_GET(lock);
-                spin_unlock(&ns->ns_unused_lock);
-                lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
-
-                /* Pass the lock through the policy filter and see if it
-                 * should stay in lru. */
-                if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
-                        loop_stop = 1;
-
-                lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
-                LDLM_LOCK_RELEASE(lock);
-                spin_lock(&ns->ns_unused_lock);
-                if (loop_stop)
-                        break;
-
-                added++;
-                unused--;
-        }
-        list_splice(&disp, ns->ns_unused_list.prev);
-        spin_unlock(&ns->ns_unused_lock);
-        RETURN(added);
-}
-
 /* when called with LDLM_ASYNC the blocking callback will be handled
  * in a thread and this function will return after the thread has been
  * asked to call the callback.  when called with LDLM_SYNC the blocking
@@ -1678,8 +1609,8 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
                         RETURN(count);
         }
 
-        /* If an error occured in ASYNC mode, or
-         * this is SYNC mode, cancel the list. */
+        /* If an error occured in ASYNC mode, or this is SYNC mode,
+         * cancel the list. */
         ldlm_cli_cancel_list(&cancels, count, NULL, 0);
         RETURN(count);
 }
@@ -2058,10 +1989,10 @@ static int replay_lock_interpret(const struct lu_env *env,
         lock->l_remote_handle = reply->lock_handle;
 
         /* Key change rehash lock in per-export hash with new key */
-       exp = req->rq_export;
+        exp = req->rq_export;
         if (exp && exp->exp_lock_hash)
                 lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
-                                      &lock->l_remote_handle,
+                                       &lock->l_remote_handle,
                                        &lock->l_exp_hash);
 
         LDLM_DEBUG(lock, "replayed lock:");
@@ -2152,7 +2083,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
         aa = ptlrpc_req_async_args(req);
         aa->lock_handle = body->lock_handle[0];
         req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
-        ptlrpcd_add_req(req);
+        ptlrpcd_add_req(req, PSCOPE_OTHER);
 
         RETURN(0);
 }