+
+ if (unlikely(req_mode == LCK_GROUP &&
+ (lock->l_req_mode != lock->l_granted_mode))) {
+ scan = 1;
+ compat = 0;
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Ok, we hit non-GROUP lock, there should be no
+ more GROUP locks later on, queue in front of
+ first non-GROUP lock */
+
+ ldlm_resource_insert_lock_after(lock, req);
+ list_del_init(&lock->l_res_link);
+ ldlm_resource_insert_lock_after(req, lock);
+ break;
+ }
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* found it */
+ ldlm_resource_insert_lock_after(lock, req);
+ break;
+ }
+ continue;
+ }
+
+ if (unlikely(lock->l_req_mode == LCK_GROUP)) {
+ /* If compared lock is GROUP, then requested is PR/PW/
+ * so this is not compatible; extent range does not
+ * matter */
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ } else {
+ *flags |= LDLM_FL_NO_TIMEOUT;
+ }
+ } else if (lock->l_policy_data.l_extent.end < req_start ||
+ lock->l_policy_data.l_extent.start > req_end) {
+ /* if a non group lock doesn't overlap skip it */
+ continue;
+ } else if (lock->l_req_extent.end < req_start ||
+ lock->l_req_extent.start > req_end) {
+ /* false contention, the requests doesn't really overlap */
+ check_contention = 0;
+ }
+
+ if (!work_list)
+ RETURN(0);
+
+ /* don't count conflicting glimpse locks */
+ if (lock->l_req_mode == LCK_PR &&
+ lock->l_policy_data.l_extent.start == 0 &&
+ lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+ check_contention = 0;
+
+ *contended_locks += check_contention;
+
+ compat = 0;
+ if (lock->l_blocking_ast &&
+ lock->l_req_mode != LCK_GROUP)
+ ldlm_add_ast_work_item(lock, req, work_list);
+ }
+ }
+
+ if (ldlm_check_contention(req, *contended_locks) &&
+ compat == 0 &&
+ (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
+ req->l_req_mode != LCK_GROUP &&
+ req_end - req_start <=
+ ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
+ GOTO(destroylock, compat = -EUSERS);
+
+ RETURN(compat);
+destroylock:
+ list_del_init(&req->l_res_link);
+ ldlm_lock_destroy_nolock(req);
+ *err = compat;
+ RETURN(compat);
+}
+
+/**
+ * This function refresh eviction timer for cancelled lock.
+ * \param[in] lock ldlm lock for refresh
+ * \param[in] arg ldlm prolong arguments, timeout, export, extent
+ * and counter are used
+ */
+void ldlm_lock_prolong_one(struct ldlm_lock *lock,
+ struct ldlm_prolong_args *arg)
+{
+ int timeout;
+
+ if (arg->lpa_export != lock->l_export ||
+ lock->l_flags & LDLM_FL_DESTROYED)
+ /* ignore unrelated locks */
+ return;
+
+ arg->lpa_locks_cnt++;
+
+ if (!(lock->l_flags & LDLM_FL_AST_SENT))
+ /* ignore locks not being cancelled */
+ return;
+
+ /* We are in the middle of the process - BL AST is sent, CANCEL
+ * is ahead. Take half of BL AT + IO AT process time.
+ */
+ timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
+
+ LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
+
+ arg->lpa_blocks_cnt++;
+
+ /* OK. this is a possible lock the user holds doing I/O
+ * let's refresh eviction timer for it.
+ */
+ ldlm_refresh_waiting_lock(lock, timeout);
+}
+EXPORT_SYMBOL(ldlm_lock_prolong_one);
+
+static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
+ void *data)
+{
+ struct ldlm_prolong_args *arg = data;
+ struct ldlm_interval *node = to_ldlm_interval(n);
+ struct ldlm_lock *lock;
+
+ ENTRY;
+
+ LASSERT(!list_empty(&node->li_group));
+
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ ldlm_lock_prolong_one(lock, arg);
+ }
+
+ RETURN(INTERVAL_ITER_CONT);
+}
+
+/**
+ * Walk through granted tree and prolong locks if they overlaps extent.
+ *
+ * \param[in] arg prolong args
+ */
+void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
+{
+ struct ldlm_interval_tree *tree;
+ struct ldlm_resource *res;
+ struct interval_node_extent ex = { .start = arg->lpa_extent.start,
+ .end = arg->lpa_extent.end };
+ int idx;
+
+ ENTRY;
+
+ res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
+ &arg->lpa_resid, LDLM_EXTENT, 0);
+ if (IS_ERR(res)) {
+ CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
+ arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
+ RETURN_EXIT;
+ }
+
+ lock_res(res);
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ tree = &res->lr_itree[idx];
+ if (tree->lit_root == NULL) /* empty tree, skipped */
+ continue;
+
+ /* There is no possibility to check for the groupID
+ * so all the group locks are considered as valid
+ * here, especially because the client is supposed
+ * to check it has such a lock before sending an RPC.
+ */
+ if (!(tree->lit_mode & arg->lpa_mode))
+ continue;
+
+ interval_search(tree->lit_root, &ex,
+ ldlm_resource_prolong_cb, arg);
+ }
+
+ unlock_res(res);
+ ldlm_resource_putref(res);
+
+ EXIT;
+}
+EXPORT_SYMBOL(ldlm_resource_prolong);
+
+
+/**
+ * Discard all AST work items from list.
+ *
+ * If for whatever reason we do not want to send ASTs to conflicting locks
+ * anymore, disassemble the list with this function.
+ */
+static void discard_bl_list(struct list_head *bl_list)
+{
+ struct list_head *tmp, *pos;
+ ENTRY;
+
+ list_for_each_safe(pos, tmp, bl_list) {
+ struct ldlm_lock *lock =
+ list_entry(pos, struct ldlm_lock, l_bl_ast);
+
+ list_del_init(&lock->l_bl_ast);
+ LASSERT(ldlm_is_ast_sent(lock));
+ ldlm_clear_ast_sent(lock);
+ LASSERT(lock->l_bl_ast_run == 0);
+ LASSERT(lock->l_blocking_lock);
+ LDLM_LOCK_RELEASE(lock->l_blocking_lock);
+ lock->l_blocking_lock = NULL;
+ LDLM_LOCK_RELEASE(lock);
+ }
+ EXIT;
+}
+
+/**
+ * Process a granting attempt for extent lock.
+ * Must be called with ns lock held.
+ *
+ * This function looks for any conflicts for \a lock in the granted or
+ * waiting queues. The lock is granted if no conflicts are found in
+ * either queue.
+ *
+ * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
+ * - blocking ASTs have already been sent
+ *
+ * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
+ * - blocking ASTs have not been sent yet, so list of conflicting locks
+ * would be collected and ASTs sent.
+ */
+int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, enum ldlm_error *err,
+ struct list_head *work_list)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head rpc_list;
+ int rc, rc2;
+ int contended_locks = 0;
+ ENTRY;
+
+ LASSERT(lock->l_granted_mode != lock->l_req_mode);
+ LASSERT(list_empty(&res->lr_converting));
+ LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
+ !ldlm_is_ast_discard_data(lock));
+ INIT_LIST_HEAD(&rpc_list);
+ check_res_locked(res);
+ *err = ELDLM_OK;
+
+ if (!first_enq) {
+ /* Careful observers will note that we don't handle -EWOULDBLOCK
+ * here, but it's ok for a non-obvious reason -- compat_queue
+ * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
+ * flags should always be zero here, and if that ever stops
+ * being true, we want to find out. */
+ LASSERT(*flags == 0);
+ rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
+ err, NULL, &contended_locks);
+ if (rc == 1) {
+ rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
+ flags, err, NULL,
+ &contended_locks);