Whamcloud - gitweb
LU-6179 llite: Implement ladvise lockahead
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
index a950b0b..5001b66 100644 (file)
@@ -269,32 +269,43 @@ ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
 static void ldlm_extent_policy(struct ldlm_resource *res,
                               struct ldlm_lock *lock, __u64 *flags)
 {
-        struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
-
-        if (lock->l_export == NULL)
-                /*
-                 * this is local lock taken by server (e.g., as a part of
-                 * OST-side locking, or unlink handling). Expansion doesn't
-                 * make a lot of sense for local locks, because they are
-                 * dropped immediately on operation completion and would only
-                 * conflict with other threads.
-                 */
-                return;
+       struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
+
+       if (lock->l_export == NULL)
+               /*
+                * this is a local lock taken by server (e.g., as a part of
+                * OST-side locking, or unlink handling). Expansion doesn't
+                * make a lot of sense for local locks, because they are
+                * dropped immediately on operation completion and would only
+                * conflict with other threads.
+                */
+               return;
 
-        if (lock->l_policy_data.l_extent.start == 0 &&
-            lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
-                /* fast-path whole file locks */
-                return;
+       if (lock->l_policy_data.l_extent.start == 0 &&
+           lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+               /* fast-path whole file locks */
+               return;
 
-        ldlm_extent_internal_policy_granted(lock, &new_ex);
-        ldlm_extent_internal_policy_waiting(lock, &new_ex);
+       /* Because reprocess_queue zeroes flags and uses it to return
+        * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
+        * in the lock flags rather than the 'flags' argument */
+       if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
+               ldlm_extent_internal_policy_granted(lock, &new_ex);
+               ldlm_extent_internal_policy_waiting(lock, &new_ex);
+       } else {
+               LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
+               new_ex.start = lock->l_policy_data.l_extent.start;
+               new_ex.end = lock->l_policy_data.l_extent.end;
+               /* In case the request is not on correct boundaries, we call
+                * fixup. (normally called in ldlm_extent_internal_policy_*) */
+               ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
+       }
 
-        if (new_ex.start != lock->l_policy_data.l_extent.start ||
-            new_ex.end != lock->l_policy_data.l_extent.end) {
-                *flags |= LDLM_FL_LOCK_CHANGED;
-                lock->l_policy_data.l_extent.start = new_ex.start;
-                lock->l_policy_data.l_extent.end = new_ex.end;
-        }
+       if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
+               *flags |= LDLM_FL_LOCK_CHANGED;
+               lock->l_policy_data.l_extent.start = new_ex.start;
+               lock->l_policy_data.l_extent.end = new_ex.end;
+       }
 }
 
 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
@@ -421,7 +432,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         }
 
                         if (tree->lit_mode == LCK_GROUP) {
-                                if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                               if (*flags & (LDLM_FL_BLOCK_NOWAIT |
+                                             LDLM_FL_SPECULATIVE)) {
                                         compat = -EWOULDBLOCK;
                                         goto destroylock;
                                 }
@@ -438,10 +450,24 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                 continue;
                         }
 
-                        if (!work_list) {
-                                rc = interval_is_overlapped(tree->lit_root,&ex);
-                                if (rc)
-                                        RETURN(0);
+                       /* We've found a potentially blocking lock, check
+                        * compatibility.  This handles locks other than GROUP
+                        * locks, which are handled separately above.
+                        *
+                        * Locks with FL_SPECULATIVE are asynchronous requests
+                        * which must never wait behind another lock, so they
+                        * fail if any conflicting lock is found. */
+                       if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
+                               rc = interval_is_overlapped(tree->lit_root,
+                                                           &ex);
+                               if (rc) {
+                                       if (!work_list) {
+                                               RETURN(0);
+                                       } else {
+                                               compat = -EWOULDBLOCK;
+                                               goto destroylock;
+                                       }
+                               }
                         } else {
                                 interval_search(tree->lit_root, &ex,
                                                 ldlm_extent_compat_cb, &data);
@@ -537,7 +563,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                          * already blocked.
                                          * If we are in nonblocking mode - return
                                          * immediately */
-                                        if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                                       if (*flags & (LDLM_FL_BLOCK_NOWAIT
+                                                     | LDLM_FL_SPECULATIVE)) {
                                                 compat = -EWOULDBLOCK;
                                                 goto destroylock;
                                         }
@@ -580,10 +607,11 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         }
 
                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
-                                /* If compared lock is GROUP, then requested is PR/PW/
-                                 * so this is not compatible; extent range does not
-                                 * matter */
-                                if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                               /* If compared lock is GROUP, then requested is
+                                * PR/PW so this is not compatible; extent
+                                * range does not matter */
+                               if (*flags & (LDLM_FL_BLOCK_NOWAIT
+                                             | LDLM_FL_SPECULATIVE)) {
                                         compat = -EWOULDBLOCK;
                                         goto destroylock;
                                 } else {
@@ -602,6 +630,11 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         if (!work_list)
                                 RETURN(0);
 
+                       if (*flags & LDLM_FL_SPECULATIVE) {
+                               compat = -EWOULDBLOCK;
+                               goto destroylock;
+                       }
+
                         /* don't count conflicting glimpse locks */
                         if (lock->l_req_mode == LCK_PR &&
                             lock->l_policy_data.l_extent.start == 0 &&
@@ -764,11 +797,11 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
        *err = ELDLM_OK;
 
        if (intention == LDLM_PROCESS_RESCAN) {
-                /* Careful observers will note that we don't handle -EWOULDBLOCK
-                 * here, but it's ok for a non-obvious reason -- compat_queue
-                 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
-                 * flags should always be zero here, and if that ever stops
-                 * being true, we want to find out. */
+               /* Careful observers will note that we don't handle -EWOULDBLOCK
+                * here, but it's ok for a non-obvious reason -- compat_queue
+                * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
+                * SPECULATIVE). flags should always be zero here, and if that
+                * ever stops being true, we want to find out. */
                 LASSERT(*flags == 0);
                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
                                               err, NULL, &contended_locks);