+ /* Extent locks are only queued once. We can get back here with
+ * insertp != NULL if the blocking ASTs returned -ERESTART. */
+ if (!list_empty(&req->l_res_link))
+ insertp = NULL;
+
+ if (req->l_req_mode != LCK_GROUP) {
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+
+ list_for_each(tmp, queue) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ if (req == lock)
+ break;
+
+ if (lock->l_req_mode == LCK_GROUP) {
+ if (*flags & LDLM_FL_BLOCK_NOWAIT)
+ RETURN(-EWOULDBLOCK);
+
+ /* No blocking ASTs are sent for group locks. */
+ compat = 0;
+
+ /* there's a blocking group lock in front
+ * of us on the queue. It can be held
+ * indefinitely, so don't timeout. */
+ if (insertp) {
+ *flags |= LDLM_FL_NO_TIMEOUT;
+ /* lock_bitlock(req) is held here. */
+ req->l_flags |= LDLM_FL_NO_TIMEOUT;
+ }
+
+ if (work_list)
+ continue;
+ else
+ break;
+ }
+
+ /* locks are compatible, overlap doesn't matter */
+ if (lockmode_compat(lock->l_req_mode, req_mode))
+ continue;
+
+ if (lock->l_policy_data.l_extent.end < req_start ||
+ lock->l_policy_data.l_extent.start > req_end)
+ continue;
+
+ compat = 0;
+
+ if (!work_list)
+ break;
+
+ if (lock->l_blocking_ast)
+ ldlm_add_ast_work_item(lock, req, work_list);
+ }
+
+ if (insertp)
+ *insertp = queue;
+
+ RETURN(compat);
+ }
+