*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* the client requested. Also we need to make sure it's also server
* page size aligned otherwise a server page can be covered by two
* write locks. */
- mask = PAGE_CACHE_SIZE;
+ mask = PAGE_SIZE;
req_align = (req_end + 1) | req_start;
if (req_align != 0 && (req_align & (mask - 1)) == 0) {
while ((req_align & mask) == 0)
__u64 req_start = req->l_req_extent.start;
__u64 req_end = req->l_req_extent.end;
struct ldlm_interval_tree *tree;
- struct interval_node_extent limiter = { new_ex->start, new_ex->end };
+ struct interval_node_extent limiter = {
+ .start = new_ex->start,
+ .end = new_ex->end,
+ };
int conflicting = 0;
int idx;
ENTRY;
/* Using interval tree to handle the LDLM extent granted locks. */
for (idx = 0; idx < LCK_MODE_NUM; idx++) {
- struct interval_node_extent ext = { req_start, req_end };
+ struct interval_node_extent ext = {
+ .start = req_start,
+ .end = req_end,
+ };
tree = &res->lr_itree[idx];
if (lockmode_compat(tree->lit_mode, req_mode))
static void ldlm_extent_policy(struct ldlm_resource *res,
struct ldlm_lock *lock, __u64 *flags)
{
- struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
-
- if (lock->l_export == NULL)
- /*
- * this is local lock taken by server (e.g., as a part of
- * OST-side locking, or unlink handling). Expansion doesn't
- * make a lot of sense for local locks, because they are
- * dropped immediately on operation completion and would only
- * conflict with other threads.
- */
- return;
+ struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
+
+ if (lock->l_export == NULL)
+ /*
+ * this is a local lock taken by server (e.g., as a part of
+ * OST-side locking, or unlink handling). Expansion doesn't
+ * make a lot of sense for local locks, because they are
+ * dropped immediately on operation completion and would only
+ * conflict with other threads.
+ */
+ return;
- if (lock->l_policy_data.l_extent.start == 0 &&
- lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
- /* fast-path whole file locks */
- return;
+ if (lock->l_policy_data.l_extent.start == 0 &&
+ lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+ /* fast-path whole file locks */
+ return;
- ldlm_extent_internal_policy_granted(lock, &new_ex);
- ldlm_extent_internal_policy_waiting(lock, &new_ex);
+ /* Because reprocess_queue zeroes flags and uses it to return
+ * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
+ * in the lock flags rather than the 'flags' argument */
+ if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
+ ldlm_extent_internal_policy_granted(lock, &new_ex);
+ ldlm_extent_internal_policy_waiting(lock, &new_ex);
+ } else {
+ LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
+ new_ex.start = lock->l_policy_data.l_extent.start;
+ new_ex.end = lock->l_policy_data.l_extent.end;
+ /* In case the request is not on correct boundaries, we call
+ * fixup. (normally called in ldlm_extent_internal_policy_*) */
+ ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
+ }
- if (new_ex.start != lock->l_policy_data.l_extent.start ||
- new_ex.end != lock->l_policy_data.l_extent.end) {
- *flags |= LDLM_FL_LOCK_CHANGED;
- lock->l_policy_data.l_extent.start = new_ex.start;
- lock->l_policy_data.l_extent.end = new_ex.end;
- }
+ if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ lock->l_policy_data.l_extent.start = new_ex.start;
+ lock->l_policy_data.l_extent.end = new_ex.end;
+ }
}
static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
}
if (tree->lit_mode == LCK_GROUP) {
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ if (*flags & (LDLM_FL_BLOCK_NOWAIT |
+ LDLM_FL_SPECULATIVE)) {
compat = -EWOULDBLOCK;
goto destroylock;
}
continue;
}
- if (!work_list) {
- rc = interval_is_overlapped(tree->lit_root,&ex);
- if (rc)
- RETURN(0);
+ /* We've found a potentially blocking lock, check
+ * compatibility. This handles locks other than GROUP
+ * locks, which are handled separately above.
+ *
+ * Locks with FL_SPECULATIVE are asynchronous requests
+ * which must never wait behind another lock, so they
+ * fail if any conflicting lock is found. */
+ if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
+ rc = interval_is_overlapped(tree->lit_root,
+ &ex);
+ if (rc) {
+ if (!work_list) {
+ RETURN(0);
+ } else {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ }
+ }
} else {
interval_search(tree->lit_root, &ex,
ldlm_extent_compat_cb, &data);
* already blocked.
* If we are in nonblocking mode - return
* immediately */
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ if (*flags & (LDLM_FL_BLOCK_NOWAIT
+ | LDLM_FL_SPECULATIVE)) {
compat = -EWOULDBLOCK;
goto destroylock;
}
}
if (unlikely(lock->l_req_mode == LCK_GROUP)) {
- /* If compared lock is GROUP, then requested is PR/PW/
- * so this is not compatible; extent range does not
- * matter */
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ /* If compared lock is GROUP, then requested is
+ * PR/PW so this is not compatible; extent
+ * range does not matter */
+ if (*flags & (LDLM_FL_BLOCK_NOWAIT
+ | LDLM_FL_SPECULATIVE)) {
compat = -EWOULDBLOCK;
goto destroylock;
} else {
if (!work_list)
RETURN(0);
+ if (*flags & LDLM_FL_SPECULATIVE) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ }
+
/* don't count conflicting glimpse locks */
if (lock->l_req_mode == LCK_PR &&
lock->l_policy_data.l_extent.start == 0 &&
}
EXPORT_SYMBOL(ldlm_resource_prolong);
-
-/**
- * Discard all AST work items from list.
- *
- * If for whatever reason we do not want to send ASTs to conflicting locks
- * anymore, disassemble the list with this function.
- */
-static void discard_bl_list(struct list_head *bl_list)
-{
- struct list_head *tmp, *pos;
- ENTRY;
-
- list_for_each_safe(pos, tmp, bl_list) {
- struct ldlm_lock *lock =
- list_entry(pos, struct ldlm_lock, l_bl_ast);
-
- list_del_init(&lock->l_bl_ast);
- LASSERT(ldlm_is_ast_sent(lock));
- ldlm_clear_ast_sent(lock);
- LASSERT(lock->l_bl_ast_run == 0);
- LASSERT(lock->l_blocking_lock);
- LDLM_LOCK_RELEASE(lock->l_blocking_lock);
- lock->l_blocking_lock = NULL;
- LDLM_LOCK_RELEASE(lock);
- }
- EXIT;
-}
-
/**
* Process a granting attempt for extent lock.
* Must be called with ns lock held.
* This function looks for any conflicts for \a lock in the granted or
* waiting queues. The lock is granted if no conflicts are found in
* either queue.
- *
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- * - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- * - blocking ASTs have not been sent yet, so list of conflicting locks
- * would be collected and ASTs sent.
*/
int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, enum ldlm_error *err,
- struct list_head *work_list)
+ enum ldlm_process_intention intention,
+ enum ldlm_error *err, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
struct list_head rpc_list;
check_res_locked(res);
*err = ELDLM_OK;
- if (!first_enq) {
- /* Careful observers will note that we don't handle -EWOULDBLOCK
- * here, but it's ok for a non-obvious reason -- compat_queue
- * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
- * flags should always be zero here, and if that ever stops
- * being true, we want to find out. */
+ if (intention == LDLM_PROCESS_RESCAN) {
+ /* Careful observers will note that we don't handle -EWOULDBLOCK
+ * here, but it's ok for a non-obvious reason -- compat_queue
+ * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
+ * SPECULATIVE). flags should always be zero here, and if that
+ * ever stops being true, we want to find out. */
LASSERT(*flags == 0);
rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
err, NULL, &contended_locks);
RETURN(LDLM_ITER_CONTINUE);
}
+ LASSERT((intention == LDLM_PROCESS_ENQUEUE && work_list == NULL) ||
+ (intention == LDLM_PROCESS_RECOVERY && work_list != NULL));
restart:
contended_locks = 0;
rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
&rpc_list, &contended_locks);
- if (rc < 0)
- GOTO(out, rc); /* lock was destroyed */
- if (rc == 2)
- goto grant;
-
- rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
- &rpc_list, &contended_locks);
- if (rc2 < 0)
- GOTO(out, rc = rc2); /* lock was destroyed */
-
- if (rc + rc2 == 2) {
- grant:
- ldlm_extent_policy(res, lock, flags);
- ldlm_resource_unlink_lock(lock);
- ldlm_grant_lock(lock, NULL);
- } else {
- /* If either of the compat_queue()s returned failure, then we
- * have ASTs to send and must go onto the waiting list.
- *
- * bug 2322: we used to unlink and re-add here, which was a
- * terrible folly -- if we goto restart, we could get
- * re-ordered! Causes deadlock, because ASTs aren't sent! */
- if (list_empty(&lock->l_res_link))
- ldlm_resource_add_lock(res, &res->lr_waiting, lock);
- unlock_res(res);
- rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
- LDLM_WORK_BL_AST);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
- !ns_is_client(ldlm_res_to_ns(res)))
- class_fail_export(lock->l_export);
-
- lock_res(res);
- if (rc == -ERESTART) {
- /* 15715: The lock was granted and destroyed after
- * resource lock was dropped. Interval node was freed
- * in ldlm_lock_destroy. Anyway, this always happens
- * when a client is being evicted. So it would be
- * ok to return an error. -jay */
- if (ldlm_is_destroyed(lock)) {
- *err = -EAGAIN;
- GOTO(out, rc = -EAGAIN);
- }
-
- /* lock was granted while resource was unlocked. */
- if (lock->l_granted_mode == lock->l_req_mode) {
- /* bug 11300: if the lock has been granted,
- * break earlier because otherwise, we will go
- * to restart and ldlm_resource_unlink will be
- * called and it causes the interval node to be
- * freed. Then we will fail at
- * ldlm_extent_add_lock() */
- *flags &= ~LDLM_FL_BLOCKED_MASK;
- GOTO(out, rc = 0);
- }
+ if (rc < 0)
+ GOTO(out_rpc_list, rc);
+
+ rc2 = 0;
+ if (rc != 2) {
+ rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
+ flags, err, &rpc_list,
+ &contended_locks);
+ if (rc2 < 0)
+ GOTO(out_rpc_list, rc = rc2);
+ }
+ if (rc + rc2 != 2) {
+ /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to force
+ * client to wait for the lock endlessly once the lock is
+ * enqueued -bzzz */
+ rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list,
+ LDLM_FL_NO_TIMEOUT);
+ if (rc == -ERESTART)
GOTO(restart, rc);
- }
-
- /* this way we force client to wait for the lock
- * endlessly once the lock is enqueued -bzzz */
- *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
-
+ *err = rc;
+ } else {
+ ldlm_extent_policy(res, lock, flags);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_grant_lock(lock, work_list);
+ rc = 0;
}
- RETURN(0);
-out:
+
+out_rpc_list:
if (!list_empty(&rpc_list)) {
LASSERT(!ldlm_is_ast_discard_data(lock));
- discard_bl_list(&rpc_list);
+ ldlm_discard_bl_list(&rpc_list);
}
RETURN(rc);
}
#endif /* HAVE_SERVER_SUPPORT */
+struct ldlm_kms_shift_args {
+ __u64 old_kms;
+ __u64 kms;
+ bool complete;
+};
+
+/* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
+static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
+ void *args)
+{
+ struct ldlm_kms_shift_args *arg = args;
+ struct ldlm_interval *node = to_ldlm_interval(n);
+ struct ldlm_lock *tmplock;
+ struct ldlm_lock *lock = NULL;
+
+ ENTRY;
+
+ /* Since all locks in an interval have the same extent, we can just
+ * use the first lock without kms_ignore set. */
+ list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
+ if (ldlm_is_kms_ignore(tmplock))
+ continue;
+
+ lock = tmplock;
+
+ break;
+ }
+
+ /* No locks in this interval without kms_ignore set */
+ if (!lock)
+ RETURN(INTERVAL_ITER_CONT);
+
+ /* If we find a lock with a greater or equal kms, we are not the
+ * highest lock (or we share that distinction with another lock), and
+ * don't need to update KMS. Return old_kms and stop looking. */
+ if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
+ arg->kms = arg->old_kms;
+ arg->complete = true;
+ RETURN(INTERVAL_ITER_STOP);
+ }
+
+ if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
+ arg->kms = lock->l_policy_data.l_extent.end + 1;
+
+ /* Since interval_iterate_reverse starts with the highest lock and
+ * works down, for PW locks, we only need to check if we should update
+ * the kms, then stop walking the tree. PR locks are not exclusive, so
+ * the highest start does not imply the highest end and we must
+ * continue. (Only one group lock is allowed per resource, so this is
+ * irrelevant for group locks.)*/
+ if (lock->l_granted_mode == LCK_PW)
+ RETURN(INTERVAL_ITER_STOP);
+ else
+ RETURN(INTERVAL_ITER_CONT);
+}
+
/* When a lock is cancelled by a client, the KMS may undergo change if this
- * is the "highest lock". This function returns the new KMS value.
+ * is the "highest lock". This function returns the new KMS value, updating
+ * it only if we were the highest lock.
+ *
* Caller must hold lr_lock already.
*
* NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
- struct ldlm_resource *res = lock->l_resource;
- struct list_head *tmp;
- struct ldlm_lock *lck;
- __u64 kms = 0;
- ENTRY;
-
- /* don't let another thread in ldlm_extent_shift_kms race in
- * just after we finish and take our lock into account in its
- * calculation of the kms */
+ struct ldlm_resource *res = lock->l_resource;
+ struct ldlm_interval_tree *tree;
+ struct ldlm_kms_shift_args args;
+ int idx = 0;
+
+ ENTRY;
+
+ args.old_kms = old_kms;
+ args.kms = 0;
+ args.complete = false;
+
+ /* don't let another thread in ldlm_extent_shift_kms race in
+ * just after we finish and take our lock into account in its
+ * calculation of the kms */
ldlm_set_kms_ignore(lock);
- list_for_each(tmp, &res->lr_granted) {
- lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+ /* We iterate over the lock trees, looking for the largest kms smaller
+ * than the current one. */
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ tree = &res->lr_itree[idx];
- if (ldlm_is_kms_ignore(lck))
- continue;
+ /* If our already known kms is >= than the highest 'end' in
+ * this tree, we don't need to check this tree, because
+ * the kms from a tree can be lower than in_max_high (due to
+ * kms_ignore), but it can never be higher. */
+ if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
+ continue;
- if (lck->l_policy_data.l_extent.end >= old_kms)
- RETURN(old_kms);
+ interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
+ &args);
- /* This extent _has_ to be smaller than old_kms (checked above)
- * so kms can only ever be smaller or the same as old_kms. */
- if (lck->l_policy_data.l_extent.end + 1 > kms)
- kms = lck->l_policy_data.l_extent.end + 1;
- }
- LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
+ /* this tells us we're not the highest lock, so we don't need
+ * to check the remaining trees */
+ if (args.complete)
+ break;
+ }
+
+ LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
+ args.old_kms);
- RETURN(kms);
+ RETURN(args.kms);
}
EXPORT_SYMBOL(ldlm_extent_shift_kms);
int index;
LASSERT(mode != 0);
- LASSERT(IS_PO2(mode));
+ LASSERT(is_power_of_2(mode));
for (index = -1; mode != 0; index++, mode >>= 1)
/* do nothing */;
LASSERT(index < LCK_MODE_NUM);
struct interval_node *found, **root;
struct ldlm_interval *node;
struct ldlm_extent *extent;
- int idx;
+ int idx, rc;
LASSERT(lock->l_granted_mode == lock->l_req_mode);
/* node extent initialize */
extent = &lock->l_policy_data.l_extent;
- interval_set(&node->li_node, extent->start, extent->end);
+
+ rc = interval_set(&node->li_node, extent->start, extent->end);
+ LASSERT(!rc);
root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root);