+ l_extent = &lock->l_policy_data.l_extent;
+
+ /* We already hit the minimum requested size, search no more */
+ if (new_ex->start == req_start && new_ex->end == req_end) {
+ EXIT;
+ return;
+ }
+
+ /* Don't conflict with ourselves */
+ if (req == lock)
+ continue;
+
+ /* Locks are compatible, overlap doesn't matter */
+ /* Until bug 20 is fixed, try to avoid granting overlapping
+ * locks on one client (they take a long time to cancel) */
+ if (lockmode_compat(lock->l_req_mode, req_mode) &&
+ lock->l_export != req->l_export)
+ continue;
+
+ /* If this is a high-traffic lock, don't grow downwards at all
+ * or grow upwards too much */
+ ++conflicting;
+ if (conflicting > 4)
+ new_ex->start = req_start;
+
+ /* If lock doesn't overlap new_ex, skip it. */
+ if (!ldlm_extent_overlap(l_extent, new_ex))
+ continue;
+
+ /* Locks conflicting in requested extents and we can't satisfy
+ * both locks, so ignore it. Either we will ping-pong this
+ * extent (we would regardless of what extent we granted) or
+ * lock is unused and it shouldn't limit our extent growth. */
+ if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
+ continue;
+
+ /* We grow extents downwards only as far as they don't overlap
+ * with already-granted locks, on the assumption that clients
+ * will be writing beyond the initial requested end and would
+ * then need to enqueue a new lock beyond previous request.
+ * l_req_extent->end strictly < req_start, checked above. */
+ if (l_extent->start < req_start && new_ex->start != req_start) {
+ if (l_extent->end >= req_start)
+ new_ex->start = req_start;
+ else
+ new_ex->start = min(l_extent->end+1, req_start);
+ }
+
+ /* If we need to cancel this lock anyways because our request
+ * overlaps the granted lock, we grow up to its requested
+ * extent start instead of limiting this extent, assuming that
+ * clients are writing forwards and the lock had over grown
+ * its extent downwards before we enqueued our request. */
+ if (l_extent->end > req_end) {
+ if (l_extent->start <= req_end)
+ new_ex->end = max(lock->l_req_extent.start - 1,
+ req_end);
+ else
+ new_ex->end = max(l_extent->start - 1, req_end);
+ }
+ }
+
+ ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
+ EXIT;
+}
+
+
+/* In order to determine the largest possible extent we can grant, we need
+ * to scan all of the queues. */
+static void ldlm_extent_policy(struct ldlm_resource *res,
+ struct ldlm_lock *lock, int *flags)
+{
+ struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
+
+ if (lock->l_export == NULL)
+ /*
+ * this is local lock taken by server (e.g., as a part of
+ * OST-side locking, or unlink handling). Expansion doesn't
+ * make a lot of sense for local locks, because they are
+ * dropped immediately on operation completion and would only
+ * conflict with other threads.
+ */
+ return;
+
+ if (lock->l_policy_data.l_extent.start == 0 &&
+ lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+ /* fast-path whole file locks */
+ return;
+
+ ldlm_extent_internal_policy_granted(lock, &new_ex);
+ ldlm_extent_internal_policy_waiting(lock, &new_ex);
+
+ if (new_ex.start != lock->l_policy_data.l_extent.start ||
+ new_ex.end != lock->l_policy_data.l_extent.end) {
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ lock->l_policy_data.l_extent.start = new_ex.start;
+ lock->l_policy_data.l_extent.end = new_ex.end;
+ }
+}
+
+static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ cfs_time_t now = cfs_time_current();
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
+ return 1;
+
+ CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
+ if (contended_locks > res->lr_namespace->ns_contended_locks)
+ res->lr_contention_time = now;
+ return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
+ cfs_time_seconds(res->lr_namespace->ns_contention_time)));
+}
+
+struct ldlm_extent_compat_args {
+ struct list_head *work_list;
+ struct ldlm_lock *lock;
+ ldlm_mode_t mode;
+ int *locks;
+ int *compat;
+};
+
+static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
+ void *data)
+{
+ struct ldlm_extent_compat_args *priv = data;
+ struct ldlm_interval *node = to_ldlm_interval(n);
+ struct ldlm_extent *extent;
+ struct list_head *work_list = priv->work_list;
+ struct ldlm_lock *lock, *enq = priv->lock;
+ ldlm_mode_t mode = priv->mode;
+ int count = 0;
+ ENTRY;
+
+ LASSERT(!list_empty(&node->li_group));
+
+ list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+ /* interval tree is for granted lock */
+ LASSERTF(mode == lock->l_granted_mode,
+ "mode = %s, lock->l_granted_mode = %s\n",
+ ldlm_lockname[mode],
+ ldlm_lockname[lock->l_granted_mode]);
+ count++;
+ if (lock->l_blocking_ast)
+ ldlm_add_ast_work_item(lock, enq, work_list);
+ }
+
+ /* don't count conflicting glimpse locks */
+ extent = ldlm_interval_extent(node);
+ if (!(mode == LCK_PR &&
+ extent->start == 0 && extent->end == OBD_OBJECT_EOF))
+ *priv->locks += count;
+
+ if (priv->compat)
+ *priv->compat = 0;
+
+ RETURN(INTERVAL_ITER_CONT);
+}
+
+/* Determine if the lock is compatible with all locks on the queue.
+ * We stop walking the queue if we hit ourselves so we don't take
+ * conflicting locks enqueued after us into accound, or we'd wait forever.
+ *
+ * 0 if the lock is not compatible
+ * 1 if the lock is compatible
+ * 2 if this group lock is compatible and requires no further checking
+ * negative error, such as EWOULDBLOCK for group locks
+ */
+static int
+ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ int *flags, ldlm_error_t *err,
+ struct list_head *work_list, int *contended_locks)
+{
+ struct list_head *tmp;
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res = req->l_resource;
+ ldlm_mode_t req_mode = req->l_req_mode;
+ __u64 req_start = req->l_req_extent.start;
+ __u64 req_end = req->l_req_extent.end;
+ int compat = 1;
+ int scan = 0;
+ int check_contention;
+ ENTRY;
+
+ lockmode_verify(req_mode);
+
+ /* Using interval tree for granted lock */
+ if (queue == &res->lr_granted) {
+ struct ldlm_interval_tree *tree;
+ struct ldlm_extent_compat_args data = {.work_list = work_list,
+ .lock = req,
+ .locks = contended_locks,
+ .compat = &compat };
+ struct interval_node_extent ex = { .start = req_start,
+ .end = req_end };
+ int idx, rc;
+
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ tree = &res->lr_itree[idx];
+ if (tree->lit_root == NULL) /* empty tree, skipped */
+ continue;
+
+ data.mode = tree->lit_mode;
+ if (lockmode_compat(req_mode, tree->lit_mode)) {
+ struct ldlm_interval *node;
+ struct ldlm_extent *extent;
+
+ if (req_mode != LCK_GROUP)
+ continue;
+
+ /* group lock, grant it immediately if
+ * compatible */
+ node = to_ldlm_interval(tree->lit_root);
+ extent = ldlm_interval_extent(node);
+ if (req->l_policy_data.l_extent.gid ==
+ extent->gid)
+ RETURN(2);
+ }
+
+ if (tree->lit_mode == LCK_GROUP) {
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ }
+
+ *flags |= LDLM_FL_NO_TIMEOUT;
+ if (!work_list)
+ RETURN(0);
+
+ /* if work list is not NULL,add all
+ locks in the tree to work list */
+ compat = 0;
+ interval_iterate(tree->lit_root,
+ ldlm_extent_compat_cb, &data);
+ continue;
+ }
+
+ if (!work_list) {
+ rc = interval_is_overlapped(tree->lit_root,&ex);
+ if (rc)
+ RETURN(0);
+ } else {
+ interval_search(tree->lit_root, &ex,
+ ldlm_extent_compat_cb, &data);
+ if (!list_empty(work_list) && compat)
+ compat = 0;
+ }
+ }
+ } else { /* for waiting queue */
+ list_for_each(tmp, queue) {
+ check_contention = 1;
+
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (req == lock)
+ break;
+
+ if (unlikely(scan)) {
+ /* We only get here if we are queuing GROUP lock
+ and met some incompatible one. The main idea of this
+ code is to insert GROUP lock past compatible GROUP
+ lock in the waiting queue or if there is not any,
+ then in front of first non-GROUP lock */
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Ok, we hit non-GROUP lock, there should
+ * be no more GROUP locks later on, queue in
+ * front of first non-GROUP lock */
+
+ ldlm_resource_insert_lock_after(lock, req);
+ list_del_init(&lock->l_res_link);
+ ldlm_resource_insert_lock_after(req, lock);
+ compat = 0;
+ break;
+ }
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* found it */
+ ldlm_resource_insert_lock_after(lock, req);
+ compat = 0;
+ break;
+ }
+ continue;
+ }
+
+ /* locks are compatible, overlap doesn't matter */
+ if (lockmode_compat(lock->l_req_mode, req_mode)) {
+ if (req_mode == LCK_PR &&
+ ((lock->l_policy_data.l_extent.start <=
+ req->l_policy_data.l_extent.start) &&
+ (lock->l_policy_data.l_extent.end >=
+ req->l_policy_data.l_extent.end))) {
+ /* If we met a PR lock just like us or wider,
+ and nobody down the list conflicted with
+ it, that means we can skip processing of
+ the rest of the list and safely place
+ ourselves at the end of the list, or grant
+ (dependent if we met an conflicting locks
+ before in the list).
+ In case of 1st enqueue only we continue
+ traversing if there is something conflicting
+ down the list because we need to make sure
+ that something is marked as AST_SENT as well,
+ in cse of empy worklist we would exit on
+ first conflict met. */
+ /* There IS a case where such flag is
+ not set for a lock, yet it blocks
+ something. Luckily for us this is
+ only during destroy, so lock is
+ exclusive. So here we are safe */
+ if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
+ RETURN(compat);
+ }
+ }
+
+ /* non-group locks are compatible, overlap doesn't
+ matter */
+ if (likely(req_mode != LCK_GROUP))
+ continue;
+
+ /* If we are trying to get a GROUP lock and there is
+ another one of this kind, we need to compare gid */
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* If existing lock with matched gid is granted,
+ we grant new one too. */
+ if (lock->l_req_mode == lock->l_granted_mode)
+ RETURN(2);
+
+ /* Otherwise we are scanning queue of waiting
+ * locks and it means current request would
+ * block along with existing lock (that is
+ * already blocked.
+ * If we are in nonblocking mode - return
+ * immediately */
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ }
+ /* If this group lock is compatible with another
+ * group lock on the waiting list, they must be
+ * together in the list, so they can be granted
+ * at the same time. Otherwise the later lock
+ * can get stuck behind another, incompatible,
+ * lock. */
+ ldlm_resource_insert_lock_after(lock, req);
+ /* Because 'lock' is not granted, we can stop
+ * processing this queue and return immediately.
+ * There is no need to check the rest of the
+ * list. */
+ RETURN(0);
+ }
+ }
+
+ if (unlikely(req_mode == LCK_GROUP &&
+ (lock->l_req_mode != lock->l_granted_mode))) {
+ scan = 1;
+ compat = 0;
+ if (lock->l_req_mode != LCK_GROUP) {
+ /* Ok, we hit non-GROUP lock, there should be no
+ more GROUP locks later on, queue in front of
+ first non-GROUP lock */
+
+ ldlm_resource_insert_lock_after(lock, req);
+ list_del_init(&lock->l_res_link);
+ ldlm_resource_insert_lock_after(req, lock);
+ break;
+ }
+ if (req->l_policy_data.l_extent.gid ==
+ lock->l_policy_data.l_extent.gid) {
+ /* found it */
+ ldlm_resource_insert_lock_after(lock, req);
+ break;
+ }
+ continue;
+ }
+
+ if (unlikely(lock->l_req_mode == LCK_GROUP)) {
+ /* If compared lock is GROUP, then requested is PR/PW/
+ * so this is not compatible; extent range does not
+ * matter */
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ compat = -EWOULDBLOCK;
+ goto destroylock;
+ } else {
+ *flags |= LDLM_FL_NO_TIMEOUT;
+ }
+ } else if (lock->l_policy_data.l_extent.end < req_start ||
+ lock->l_policy_data.l_extent.start > req_end) {
+ /* if a non group lock doesn't overlap skip it */
+ continue;
+ } else if (lock->l_req_extent.end < req_start ||
+ lock->l_req_extent.start > req_end) {
+ /* false contention, the requests doesn't really overlap */
+ check_contention = 0;
+ }
+
+ if (!work_list)
+ RETURN(0);
+
+ /* don't count conflicting glimpse locks */
+ if (lock->l_req_mode == LCK_PR &&
+ lock->l_policy_data.l_extent.start == 0 &&
+ lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+ check_contention = 0;
+
+ *contended_locks += check_contention;
+
+ compat = 0;
+ if (lock->l_blocking_ast)
+ ldlm_add_ast_work_item(lock, req, work_list);
+ }
+ }
+
+ if (ldlm_check_contention(req, *contended_locks) &&
+ compat == 0 &&
+ (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
+ req->l_req_mode != LCK_GROUP &&
+ req_end - req_start <=
+ req->l_resource->lr_namespace->ns_max_nolock_size)
+ GOTO(destroylock, compat = -EUSERS);
+
+ RETURN(compat);
+destroylock:
+ list_del_init(&req->l_res_link);
+ ldlm_lock_destroy_nolock(req);
+ *err = compat;
+ RETURN(compat);
+}
+
+static void discard_bl_list(struct list_head *bl_list)
+{
+ struct list_head *tmp, *pos;
+ ENTRY;
+
+ list_for_each_safe(pos, tmp, bl_list) {
+ struct ldlm_lock *lock =
+ list_entry(pos, struct ldlm_lock, l_bl_ast);
+
+ list_del_init(&lock->l_bl_ast);
+ LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ lock->l_flags &= ~LDLM_FL_AST_SENT;
+ LASSERT(lock->l_bl_ast_run == 0);
+ LASSERT(lock->l_blocking_lock);
+ LDLM_LOCK_RELEASE(lock->l_blocking_lock);
+ lock->l_blocking_lock = NULL;
+ LDLM_LOCK_RELEASE(lock);
+ }
+ EXIT;
+}