+ RETURN(intention == LDLM_PROCESS_RESCAN ? rc : LDLM_ITER_CONTINUE);
+}
+
+/**
+ * Conflicting locks are detected for a lock to be enqueued, add the lock
+ * into waiting list and send blocking ASTs to the conflicting locks.
+ *
+ * \param[in] lock The lock to be enqueued.
+ * \param[out] flags Lock flags for the lock to be enqueued.
+ * \param[in] rpc_list Conflicting locks list.
+ * \param[in] grant_flags extra flags when granting a lock.
+ *
+ * \retval -ERESTART: Some lock was instantly canceled while sending
+ * blocking ASTs, caller needs to re-check conflicting
+ * locks.
+ * \retval -EAGAIN: Lock was destroyed, caller should return error.
+ * \reval 0: Lock is successfully added in waiting list.
+ */
+int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
+ struct list_head *rpc_list, __u64 grant_flags)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ int rc;
+ ENTRY;
+
+ check_res_locked(res);
+
+ /* If either of the compat_queue()s returned failure, then we
+ * have ASTs to send and must go onto the waiting list.
+ *
+ * bug 2322: we used to unlink and re-add here, which was a
+ * terrible folly -- if we goto restart, we could get
+ * re-ordered! Causes deadlock, because ASTs aren't sent! */
+ if (list_empty(&lock->l_res_link))
+ ldlm_resource_add_lock(res, &res->lr_waiting, lock);
+ unlock_res(res);
+
+ rc = ldlm_run_ast_work(ldlm_res_to_ns(res), rpc_list,
+ LDLM_WORK_BL_AST);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
+ !ns_is_client(ldlm_res_to_ns(res)))
+ class_fail_export(lock->l_export);
+
+ lock_res(res);
+ if (rc == -ERESTART) {
+ /* 15715: The lock was granted and destroyed after
+ * resource lock was dropped. Interval node was freed
+ * in ldlm_lock_destroy. Anyway, this always happens
+ * when a client is being evicted. So it would be
+ * ok to return an error. -jay */
+ if (ldlm_is_destroyed(lock))
+ RETURN(-EAGAIN);
+
+ /* lock was granted while resource was unlocked. */
+ if (lock->l_granted_mode == lock->l_req_mode) {
+ /* bug 11300: if the lock has been granted,
+ * break earlier because otherwise, we will go
+ * to restart and ldlm_resource_unlink will be
+ * called and it causes the interval node to be
+ * freed. Then we will fail at
+ * ldlm_extent_add_lock() */
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
+ RETURN(0);
+ }
+
+ RETURN(rc);
+ }
+ *flags |= (LDLM_FL_BLOCK_GRANTED | grant_flags);
+
+ RETURN(0);