#ifdef HAVE_SERVER_SUPPORT
/**
+ * It should iterate through all waiting locks on a given resource queue and
+ * attempt to grant them. An optimization is to check only heads waitintg
+ * locks for each inodebit type.
+ *
+ * Must be called with resource lock held.
+ */
+int ldlm_reprocess_inodebits_queue(struct ldlm_resource *res,
+ struct list_head *queue,
+ struct list_head *work_list,
+ enum ldlm_process_intention intention,
+ struct ldlm_lock *hint)
+{
+ __u64 flags;
+ int rc = LDLM_ITER_CONTINUE;
+ enum ldlm_error err;
+ LIST_HEAD(bl_ast_list);
+ struct ldlm_ibits_queues *queues = res->lr_ibits_queues;
+ int i;
+
+ ENTRY;
+
+ check_res_locked(res);
+
+ LASSERT(res->lr_type == LDLM_IBITS);
+ LASSERT(intention == LDLM_PROCESS_RESCAN ||
+ intention == LDLM_PROCESS_RECOVERY);
+
+ if (intention == LDLM_PROCESS_RECOVERY)
+ return ldlm_reprocess_queue(res, queue, work_list, intention,
+ NULL);
+
+restart:
+ CDEBUG(D_DLMTRACE, "--- Reprocess resource "DLDLMRES" (%p)\n",
+ PLDLMRES(res), res);
+
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
+ LIST_HEAD(rpc_list);
+ struct list_head *head = &queues->liq_waiting[i];
+ struct ldlm_lock *pending;
+ struct ldlm_ibits_node *node;
+
+ if (list_empty(head))
+ continue;
+ if (hint && !(hint->l_policy_data.l_inodebits.bits & BIT(i)))
+ continue;
+
+ node = list_entry(head->next, struct ldlm_ibits_node,
+ lin_link[i]);
+
+ pending = node->lock;
+ LDLM_DEBUG(pending, "Reprocessing lock from queue %d", i);
+
+ flags = 0;
+ rc = ldlm_process_inodebits_lock(pending, &flags, intention,
+ &err, &rpc_list);
+ if (ldlm_is_granted(pending)) {
+ list_splice(&rpc_list, work_list);
+ /* Try to grant more locks from current queue */
+ i--;
+ } else {
+ list_splice(&rpc_list, &bl_ast_list);
+ }
+ }
+
+ if (!list_empty(&bl_ast_list)) {
+ unlock_res(res);
+
+ rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &bl_ast_list,
+ LDLM_WORK_BL_AST);
+
+ lock_res(res);
+ if (rc == -ERESTART)
+ GOTO(restart, rc);
+ }
+
+ if (!list_empty(&bl_ast_list))
+ ldlm_discard_bl_list(&bl_ast_list);
+
+ RETURN(rc);
+}
+
+/**
* Determine if the lock is compatible with all locks on the queue.
*
* If \a work_list is provided, conflicting locks are linked there.
*/
static int
ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
- struct list_head *work_list)
+ __u64 *ldlm_flags, struct list_head *work_list)
{
+ enum ldlm_mode req_mode = req->l_req_mode;
struct list_head *tmp;
struct ldlm_lock *lock;
__u64 req_bits = req->l_policy_data.l_inodebits.bits;
ENTRY;
+ lockmode_verify(req_mode);
+
/* There is no sense in lock with no bits set. Also such a lock
* would be compatible with any other bit lock.
* Meanwhile that can be true if there were just try_bits and all
if ((req_bits | *try_bits) == 0)
RETURN(0);
+ /* Group lock could be only DOM */
+ if (unlikely(req_mode == LCK_GROUP &&
+ (req_bits | *try_bits) != MDS_INODELOCK_DOM))
+ RETURN(-EPROTO);
+
list_for_each(tmp, queue) {
struct list_head *mode_tail;
continue;
}
- /* locks' mode are compatible, bits don't matter */
- if (lockmode_compat(lock->l_req_mode, req->l_req_mode)) {
- /* jump to last lock in mode group */
- tmp = mode_tail;
+ if (lockmode_compat(lock->l_req_mode, req_mode)) {
+ /* non group locks are compatible, bits don't matter */
+ if (likely(req_mode != LCK_GROUP)) {
+ /* jump to last lock in mode group */
+ tmp = mode_tail;
+ continue;
+ }
+
+ if (req->l_policy_data.l_inodebits.li_gid ==
+ lock->l_policy_data.l_inodebits.li_gid) {
+ if (ldlm_is_granted(lock))
+ RETURN(2);
+
+ if (*ldlm_flags & LDLM_FL_BLOCK_NOWAIT)
+ RETURN(-EWOULDBLOCK);
+
+ /* Place the same group together */
+ ldlm_resource_insert_lock_after(lock, req);
+ RETURN(0);
+ }
+ }
+
+ /* GROUP locks are placed to a head of the waiting list, but
+ * grouped by gid. */
+ if (unlikely(req_mode == LCK_GROUP && !ldlm_is_granted(lock))) {
+ compat = 0;
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Already not a GROUP lock, insert before. */
+ ldlm_resource_insert_lock_before(lock, req);
+ break;
+ }
+ /* Still GROUP but a different gid(the same gid would
+ * be handled above). Keep searching for the same gid */
+ LASSERT(req->l_policy_data.l_inodebits.li_gid !=
+ lock->l_policy_data.l_inodebits.li_gid);
continue;
}
!ldlm_is_cos_incompat(req) &&
ldlm_is_cos_enabled(req) &&
lock->l_client_cookie == req->l_client_cookie)
- goto not_conflicting;
+ goto skip_work_list;
+
+ compat = 0;
+
+ if (unlikely(lock->l_req_mode == LCK_GROUP)) {
+ LASSERT(ldlm_has_dom(lock));
+
+ if (*ldlm_flags & LDLM_FL_BLOCK_NOWAIT)
+ RETURN(-EWOULDBLOCK);
+
+ /* Local combined DOM lock came across
+ * GROUP DOM lock, it makes the thread
+ * to be blocked for a long time, not
+ * allowed, the trybits to be used
+ * instead.
+ */
+ if (!req->l_export &&
+ (req_bits & MDS_INODELOCK_DOM) &&
+ (req_bits & ~MDS_INODELOCK_DOM))
+ LBUG();
+
+ goto skip_work_list;
+ }
/* Found a conflicting policy group. */
if (!work_list)
RETURN(0);
- compat = 0;
-
/* Add locks of the policy group to @work_list
* as blocking locks for @req */
if (lock->l_blocking_ast)
ldlm_add_ast_work_item(lock,
req, work_list);
}
-not_conflicting:
+skip_work_list:
if (tmp == mode_tail)
break;
* waiting queues. The lock is granted if no conflicts are found in
* either queue.
*/
-int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
+int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *ldlm_flags,
enum ldlm_process_intention intention,
enum ldlm_error *err,
struct list_head *work_list)
struct ldlm_resource *res = lock->l_resource;
struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
NULL : work_list;
- int rc;
-
+ int rc, rc2 = 0;
ENTRY;
+ *err = ELDLM_LOCK_ABORTED;
LASSERT(!ldlm_is_granted(lock));
check_res_locked(res);
if (intention == LDLM_PROCESS_RESCAN) {
- struct list_head *bl_list;
-
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- bl_list = NULL;
- *err = ELDLM_LOCK_WOULDBLOCK;
- } else {
- bl_list = work_list;
- *err = ELDLM_LOCK_ABORTED;
- }
+ struct list_head *bl_list =
+ *ldlm_flags & LDLM_FL_BLOCK_NOWAIT ? NULL : work_list;
LASSERT(lock->l_policy_data.l_inodebits.bits != 0);
* any blocked locks from granted queue during every reprocess
* and bl_ast will be sent if needed.
*/
+ *ldlm_flags = 0;
rc = ldlm_inodebits_compat_queue(&res->lr_granted, lock,
- bl_list);
+ ldlm_flags, bl_list);
if (!rc)
RETURN(LDLM_ITER_STOP);
- rc = ldlm_inodebits_compat_queue(&res->lr_waiting, lock, NULL);
+ rc = ldlm_inodebits_compat_queue(&res->lr_waiting, lock,
+ ldlm_flags, NULL);
if (!rc)
RETURN(LDLM_ITER_STOP);
lock->l_policy_data.l_inodebits.bits |=
lock->l_policy_data.l_inodebits.try_bits;
lock->l_policy_data.l_inodebits.try_bits = 0;
- *flags |= LDLM_FL_LOCK_CHANGED;
+ *ldlm_flags |= LDLM_FL_LOCK_CHANGED;
}
ldlm_resource_unlink_lock(lock);
ldlm_grant_lock(lock, grant_work);
RETURN(LDLM_ITER_CONTINUE);
}
- rc = ldlm_inodebits_compat_queue(&res->lr_granted, lock, work_list);
- rc += ldlm_inodebits_compat_queue(&res->lr_waiting, lock, work_list);
+ rc = ldlm_inodebits_compat_queue(&res->lr_granted, lock,
+ ldlm_flags, work_list);
+ if (rc < 0)
+ GOTO(out, *err = rc);
if (rc != 2) {
+ rc2 = ldlm_inodebits_compat_queue(&res->lr_waiting, lock,
+ ldlm_flags, work_list);
+ if (rc2 < 0)
+ GOTO(out, *err = rc = rc2);
+ }
+
+ if (rc + rc2 != 2) {
/* if there were only bits to try and all are conflicting */
if ((lock->l_policy_data.l_inodebits.bits |
- lock->l_policy_data.l_inodebits.try_bits) == 0) {
- *err = ELDLM_LOCK_WOULDBLOCK;
- } else {
+ lock->l_policy_data.l_inodebits.try_bits)) {
+ /* There is no sense to set LDLM_FL_NO_TIMEOUT to
+ * @ldlm_flags for DOM lock while they are enqueued
+ * through intents, i.e. @lock here is local which does
+ * not timeout. */
*err = ELDLM_OK;
}
} else {
lock->l_policy_data.l_inodebits.bits |=
lock->l_policy_data.l_inodebits.try_bits;
lock->l_policy_data.l_inodebits.try_bits = 0;
- *flags |= LDLM_FL_LOCK_CHANGED;
+ *ldlm_flags |= LDLM_FL_LOCK_CHANGED;
}
LASSERT(lock->l_policy_data.l_inodebits.bits);
ldlm_resource_unlink_lock(lock);
}
RETURN(LDLM_ITER_CONTINUE);
+out:
+ return rc;
}
#endif /* HAVE_SERVER_SUPPORT */
union ldlm_policy_data *lpolicy)
{
lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
- lpolicy->l_inodebits.try_bits = wpolicy->l_inodebits.try_bits;
+ /**
+ * try_bits and li_gid are to be handled outside of generic
+ * write_to_local due to different behavior on a server and client.
+ */
}
void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
wpolicy->l_inodebits.try_bits = lpolicy->l_inodebits.try_bits;
+ wpolicy->l_inodebits.li_gid = lpolicy->l_inodebits.li_gid;
}
/**
EXPORT_SYMBOL(ldlm_inodebits_drop);
/* convert single lock */
-int ldlm_cli_dropbits(struct ldlm_lock *lock, __u64 drop_bits)
+int ldlm_cli_inodebits_convert(struct ldlm_lock *lock,
+ enum ldlm_cancel_flags cancel_flags)
{
- struct lustre_handle lockh;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ struct ldlm_lock_desc ld = { { 0 } };
+ __u64 drop_bits, new_bits;
__u32 flags = 0;
int rc;
ENTRY;
- LASSERT(drop_bits);
- LASSERT(!lock->l_readers && !lock->l_writers);
+ check_res_locked(lock->l_resource);
- LDLM_DEBUG(lock, "client lock convert START");
+ /* Lock is being converted already */
+ if (ldlm_is_converting(lock)) {
+ if (!(cancel_flags & LCF_ASYNC)) {
+ unlock_res_and_lock(lock);
+ wait_event_idle(lock->l_waitq,
+ is_lock_converted(lock));
+ lock_res_and_lock(lock);
+ }
+ RETURN(0);
+ }
+
+ /* lru_cancel may happen in parallel and call ldlm_cli_cancel_list()
+ * independently.
+ */
+ if (ldlm_is_canceling(lock))
+ RETURN(-EINVAL);
+
+ /* no need in only local convert */
+ if (lock->l_flags & (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK))
+ RETURN(-EINVAL);
+
+ drop_bits = lock->l_policy_data.l_inodebits.cancel_bits;
+ /* no cancel bits - means that caller needs full cancel */
+ if (drop_bits == 0)
+ RETURN(-EINVAL);
+
+ new_bits = lock->l_policy_data.l_inodebits.bits & ~drop_bits;
+ /* check if all lock bits are dropped, proceed with cancel */
+ if (!new_bits)
+ RETURN(-EINVAL);
+
+ /* check if no dropped bits, consider this as successful convert */
+ if (lock->l_policy_data.l_inodebits.bits == new_bits)
+ RETURN(0);
- ldlm_lock2handle(lock, &lockh);
+ ldlm_set_converting(lock);
+ /* Finally call cancel callback for remaining bits only.
+ * It is important to have converting flag during that
+ * so blocking_ast callback can distinguish convert from
+ * cancels.
+ */
+ ld.l_policy_data.l_inodebits.cancel_bits = drop_bits;
+ unlock_res_and_lock(lock);
+ lock->l_blocking_ast(lock, &ld, lock->l_ast_data, LDLM_CB_CANCELING);
+ /* now notify server about convert */
+ rc = ldlm_cli_convert_req(lock, &flags, new_bits);
lock_res_and_lock(lock);
- /* check if all bits are blocked */
- if (!(lock->l_policy_data.l_inodebits.bits & ~drop_bits)) {
- unlock_res_and_lock(lock);
- /* return error to continue with cancel */
- GOTO(exit, rc = -EINVAL);
- }
+ if (rc)
+ GOTO(full_cancel, rc);
- /* check if no common bits, consider this as successful convert */
- if (!(lock->l_policy_data.l_inodebits.bits & drop_bits)) {
- unlock_res_and_lock(lock);
- GOTO(exit, rc = 0);
- }
+ /* Finally clear these bits in lock ibits */
+ ldlm_inodebits_drop(lock, drop_bits);
- /* check if there is race with cancel */
- if (ldlm_is_canceling(lock) || ldlm_is_cancel(lock)) {
- unlock_res_and_lock(lock);
- GOTO(exit, rc = -EINVAL);
- }
+ /* Being locked again check if lock was canceled, it is important
+ * to do and don't drop cbpending below
+ */
+ if (ldlm_is_canceling(lock))
+ GOTO(full_cancel, rc = -EINVAL);
+
+ /* also check again if more bits to be cancelled appeared */
+ if (drop_bits != lock->l_policy_data.l_inodebits.cancel_bits)
+ GOTO(clear_converting, rc = -EAGAIN);
/* clear cbpending flag early, it is safe to match lock right after
* client convert because it is downgrade always.
*/
ldlm_clear_cbpending(lock);
ldlm_clear_bl_ast(lock);
+ spin_lock(&ns->ns_lock);
+ if (list_empty(&lock->l_lru))
+ ldlm_lock_add_to_lru_nolock(lock);
+ spin_unlock(&ns->ns_lock);
- /* If lock is being converted already, check drop bits first */
- if (ldlm_is_converting(lock)) {
- /* raced lock convert, lock inodebits are remaining bits
- * so check if they are conflicting with new convert or not.
- */
- if (!(lock->l_policy_data.l_inodebits.bits & drop_bits)) {
- unlock_res_and_lock(lock);
- GOTO(exit, rc = 0);
- }
- /* Otherwise drop new conflicting bits in new convert */
- }
- ldlm_set_converting(lock);
- /* from all bits of blocking lock leave only conflicting */
- drop_bits &= lock->l_policy_data.l_inodebits.bits;
- /* save them in cancel_bits, so l_blocking_ast will know
- * which bits from the current lock were dropped. */
- lock->l_policy_data.l_inodebits.cancel_bits = drop_bits;
- /* Finally clear these bits in lock ibits */
- ldlm_inodebits_drop(lock, drop_bits);
- unlock_res_and_lock(lock);
- /* Finally call cancel callback for remaining bits only.
- * It is important to have converting flag during that
- * so blocking_ast callback can distinguish convert from
- * cancels.
+ /* the job is done, zero the cancel_bits. If more conflicts appear,
+ * it will result in another cycle of ldlm_cli_inodebits_convert().
*/
- if (lock->l_blocking_ast)
- lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
- LDLM_CB_CANCELING);
+full_cancel:
+ lock->l_policy_data.l_inodebits.cancel_bits = 0;
+clear_converting:
+ ldlm_clear_converting(lock);
+ RETURN(rc);
+}
- /* now notify server about convert */
- rc = ldlm_cli_convert(lock, &flags);
- if (rc) {
- lock_res_and_lock(lock);
- if (ldlm_is_converting(lock)) {
- ldlm_clear_converting(lock);
- ldlm_set_cbpending(lock);
- ldlm_set_bl_ast(lock);
+int ldlm_inodebits_alloc_lock(struct ldlm_lock *lock)
+{
+ if (ldlm_is_ns_srv(lock)) {
+ int i;
+
+ OBD_SLAB_ALLOC_PTR(lock->l_ibits_node, ldlm_inodebits_slab);
+ if (lock->l_ibits_node == NULL)
+ return -ENOMEM;
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
+ INIT_LIST_HEAD(&lock->l_ibits_node->lin_link[i]);
+ lock->l_ibits_node->lock = lock;
+ } else {
+ lock->l_ibits_node = NULL;
+ }
+ return 0;
+}
+
+void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head,
+ struct ldlm_lock *lock, bool tail)
+{
+ int i;
+
+ if (!ldlm_is_ns_srv(lock))
+ return;
+
+ if (head == &res->lr_waiting) {
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
+ if (!(lock->l_policy_data.l_inodebits.bits & BIT(i)))
+ continue;
+ if (tail)
+ list_add_tail(&lock->l_ibits_node->lin_link[i],
+ &res->lr_ibits_queues->liq_waiting[i]);
+ else
+ list_add(&lock->l_ibits_node->lin_link[i],
+ &res->lr_ibits_queues->liq_waiting[i]);
+ }
+ } else if (head == &res->lr_granted && lock->l_ibits_node != NULL) {
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
+ LASSERT(list_empty(&lock->l_ibits_node->lin_link[i]));
+ OBD_SLAB_FREE_PTR(lock->l_ibits_node, ldlm_inodebits_slab);
+ lock->l_ibits_node = NULL;
+ } else if (head != &res->lr_granted) {
+ /* we are inserting in a middle of a list, after @head */
+ struct ldlm_lock *orig = list_entry(head, struct ldlm_lock,
+ l_res_link);
+ LASSERT(orig->l_policy_data.l_inodebits.bits ==
+ lock->l_policy_data.l_inodebits.bits);
+ /* The is no a use case to insert before with exactly matched
+ * set of bits */
+ LASSERT(tail == false);
+
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
+ if (!(lock->l_policy_data.l_inodebits.bits & (1 << i)))
+ continue;
+ list_add(&lock->l_ibits_node->lin_link[i],
+ &orig->l_ibits_node->lin_link[i]);
}
- unlock_res_and_lock(lock);
- GOTO(exit, rc);
}
- EXIT;
-exit:
- LDLM_DEBUG(lock, "client lock convert END");
- return rc;
+}
+
+void ldlm_inodebits_unlink_lock(struct ldlm_lock *lock)
+{
+ int i;
+
+ ldlm_unlink_lock_skiplist(lock);
+ if (!ldlm_is_ns_srv(lock))
+ return;
+
+ for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
+ list_del_init(&lock->l_ibits_node->lin_link[i]);
}