+/* In order to determine the largest possible extent we can grant, we need
+ * to scan all of the queues. */
+static void ldlm_extent_policy(struct ldlm_resource *res,
+ struct ldlm_lock *lock, int *flags)
+{
+ struct ldlm_extent new_ex = { .start = 0, .end = ~0};
+
+ ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
+ ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
+
+ if (new_ex.start != lock->l_policy_data.l_extent.start ||
+ new_ex.end != lock->l_policy_data.l_extent.end) {
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ lock->l_policy_data.l_extent.start = new_ex.start;
+ lock->l_policy_data.l_extent.end = new_ex.end;
+ }
+}
+
+/* Determine if the lock is compatible with all locks on the queue.
+ * We stop walking the queue if we hit ourselves so we don't take
+ * conflicting locks enqueued after us into accound, or we'd wait forever. */
+static int
+ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ int send_cbs)
+{
+ struct list_head *tmp;
+ struct ldlm_lock *lock;
+ ldlm_mode_t req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ int compat = 1;
+ ENTRY;
+
+ lockmode_verify(req_mode);
+
+ list_for_each(tmp, queue) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (req == lock)
+ RETURN(compat);
+
+ /* locks are compatible, overlap doesn't matter */
+ if (lockmode_compat(lock->l_req_mode, req_mode))
+ continue;
+
+ /* if lock doesn't overlap skip it */
+ if (lock->l_policy_data.l_extent.end < req_start ||
+ lock->l_policy_data.l_extent.start > req_end)
+ continue;
+
+ if (!send_cbs)
+ RETURN(0);
+
+ compat = 0;
+ if (lock->l_blocking_ast)
+ ldlm_add_ast_work_item(lock, req, NULL, 0);
+ }
+
+ RETURN(compat);
+}
+
+/* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
+ * - blocking ASTs have already been sent
+ * - the caller has already initialized req->lr_tmp
+ * - must call this function with the ns lock held
+ *
+ * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
+ * - blocking ASTs have not been sent
+ * - the caller has NOT initialized req->lr_tmp, so we must
+ * - must call this function with the ns lock held once */
+int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
+ ldlm_error_t *err)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ int rc;
+ ENTRY;
+
+ LASSERT(list_empty(&res->lr_converting));
+
+ if (!first_enq) {
+ LASSERT(res->lr_tmp != NULL);
+ rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0);
+ if (!rc)
+ RETURN(LDLM_ITER_STOP);
+ rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0);
+ if (!rc)
+ RETURN(LDLM_ITER_STOP);
+
+ ldlm_resource_unlink_lock(lock);
+
+ ldlm_extent_policy(res, lock, flags);
+ ldlm_grant_lock(lock, NULL, 0, 1);
+ RETURN(LDLM_ITER_CONTINUE);
+ }
+
+ restart:
+ LASSERT(res->lr_tmp == NULL);
+ res->lr_tmp = &rpc_list;
+ rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1);
+ rc += ldlm_extent_compat_queue(&res->lr_waiting, lock, 1);
+ res->lr_tmp = NULL;
+
+ if (rc != 2) {
+ /* If either of the compat_queue()s returned 0, then we
+ * have ASTs to send and must go onto the waiting list.
+ *
+ * bug 2322: we used to unlink and re-add here, which was a
+ * terrible folly -- if we goto restart, we could get
+ * re-ordered! Causes deadlock, because ASTs aren't sent! */
+ if (list_empty(&lock->l_res_link))
+ ldlm_resource_add_lock(res, &res->lr_waiting, lock);
+ l_unlock(&res->lr_namespace->ns_lock);
+ rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
+ l_lock(&res->lr_namespace->ns_lock);
+ if (rc == -ERESTART)
+ GOTO(restart, -ERESTART);
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ } else {
+ ldlm_extent_policy(res, lock, flags);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_grant_lock(lock, NULL, 0, 0);
+ }
+ RETURN(0);
+}
+
+/* When a lock is cancelled by a client, the KMS may undergo change if this
+ * is the "highest lock". This function returns the new KMS value.