Whamcloud - gitweb
LU-4423 lustre: don't declare extern variables in C files.
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
index 44680dd..12ffa08 100644 (file)
@@ -136,8 +136,6 @@ const char *ldlm_it2str(enum ldlm_intent_flags it)
 }
 EXPORT_SYMBOL(ldlm_it2str);
 
-extern struct kmem_cache *ldlm_lock_slab;
-
 #ifdef HAVE_SERVER_SUPPORT
 static ldlm_processing_policy ldlm_processing_policy_table[] = {
        [LDLM_PLAIN]    = ldlm_process_plain_lock,
@@ -478,7 +476,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
 
         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
                              LDLM_NSS_LOCKS);
-       INIT_LIST_HEAD(&lock->l_handle.h_link);
+       INIT_LIST_HEAD_RCU(&lock->l_handle.h_link);
        class_handle_hash(&lock->l_handle, &lock_handle_ops);
 
         lu_ref_init(&lock->l_reference);
@@ -665,12 +663,19 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                 * discard dirty data, rather than writing back. */
                if (ldlm_is_ast_discard_data(new))
                        ldlm_set_discard_data(lock);
-               LASSERT(list_empty(&lock->l_bl_ast));
-               list_add(&lock->l_bl_ast, work_list);
-                LDLM_LOCK_GET(lock);
-                LASSERT(lock->l_blocking_lock == NULL);
-                lock->l_blocking_lock = LDLM_LOCK_GET(new);
-        }
+
+               /* Lock can be converted from a blocking state back to granted
+                * after lock convert or COS downgrade but still be in an
+                * older bl_list because it is controlled only by
+                * ldlm_work_bl_ast_lock(), let it be processed there.
+                */
+               if (list_empty(&lock->l_bl_ast)) {
+                       list_add(&lock->l_bl_ast, work_list);
+                       LDLM_LOCK_GET(lock);
+               }
+               LASSERT(lock->l_blocking_lock == NULL);
+               lock->l_blocking_lock = LDLM_LOCK_GET(new);
+       }
 }
 
 /**
@@ -1077,7 +1082,7 @@ void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
 {
        struct sl_insert_point prev;
 
-       LASSERT(lock->l_req_mode == lock->l_granted_mode);
+       LASSERT(ldlm_is_granted(lock));
 
        search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
        ldlm_granted_list_add_lock(lock, &prev);
@@ -1139,6 +1144,7 @@ struct lock_match_data {
        enum ldlm_mode          *lmd_mode;
        union ldlm_policy_data  *lmd_policy;
        __u64                    lmd_flags;
+       __u64                    lmd_skip_flags;
        int                      lmd_unref;
 };
 
@@ -1210,6 +1216,10 @@ static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
        if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
                return INTERVAL_ITER_CONT;
 
+       /* Filter locks by skipping flags */
+       if (data->lmd_skip_flags & lock->l_flags)
+               return INTERVAL_ITER_CONT;
+
        if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
                LDLM_LOCK_GET(lock);
                ldlm_lock_touch_in_lru(lock);
@@ -1255,6 +1265,8 @@ static struct ldlm_lock *search_itree(struct ldlm_resource *res,
        };
        int idx;
 
+       data->lmd_lock = NULL;
+
        for (idx = 0; idx < LCK_MODE_NUM; idx++) {
                struct ldlm_interval_tree *tree = &res->lr_itree[idx];
 
@@ -1266,8 +1278,11 @@ static struct ldlm_lock *search_itree(struct ldlm_resource *res,
 
                interval_search(tree->lit_root, &ext,
                                itree_overlap_cb, data);
+               if (data->lmd_lock)
+                       return data->lmd_lock;
        }
-       return data->lmd_lock;
+
+       return NULL;
 }
 
 
@@ -1285,11 +1300,14 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
        struct ldlm_lock *lock;
        int rc;
 
+       data->lmd_lock = NULL;
+
        list_for_each_entry(lock, queue, l_res_link) {
                rc = lock_matches(lock, data);
                if (rc == INTERVAL_ITER_STOP)
                        return data->lmd_lock;
        }
+
        return NULL;
 }
 
@@ -1365,24 +1383,27 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
  * keep caller code unchanged), the context failure will be discovered by
  * caller sometime later.
  */
-enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
-                              const struct ldlm_res_id *res_id,
-                              enum ldlm_type type,
-                              union ldlm_policy_data *policy,
-                              enum ldlm_mode mode,
-                              struct lustre_handle *lockh, int unref)
+enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
+                                        __u64 flags, __u64 skip_flags,
+                                        const struct ldlm_res_id *res_id,
+                                        enum ldlm_type type,
+                                        union ldlm_policy_data *policy,
+                                        enum ldlm_mode mode,
+                                        struct lustre_handle *lockh, int unref)
 {
        struct lock_match_data data = {
-               .lmd_old        = NULL,
-               .lmd_lock       = NULL,
-               .lmd_mode       = &mode,
-               .lmd_policy     = policy,
-               .lmd_flags      = flags,
-               .lmd_unref      = unref,
+               .lmd_old = NULL,
+               .lmd_lock = NULL,
+               .lmd_mode = &mode,
+               .lmd_policy = policy,
+               .lmd_flags = flags,
+               .lmd_skip_flags = skip_flags,
+               .lmd_unref = unref,
        };
        struct ldlm_resource *res;
        struct ldlm_lock *lock;
-       int rc = 0;
+       int matched;
+
        ENTRY;
 
        if (ns == NULL) {
@@ -1403,98 +1424,78 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 
        LDLM_RESOURCE_ADDREF(res);
        lock_res(res);
-
        if (res->lr_type == LDLM_EXTENT)
                lock = search_itree(res, &data);
        else
                lock = search_queue(&res->lr_granted, &data);
-       if (lock != NULL)
-               GOTO(out, rc = 1);
-       if (flags & LDLM_FL_BLOCK_GRANTED)
-               GOTO(out, rc = 0);
-       lock = search_queue(&res->lr_waiting, &data);
-       if (lock != NULL)
-               GOTO(out, rc = 1);
-
-        EXIT;
- out:
-        unlock_res(res);
-        LDLM_RESOURCE_DELREF(res);
-        ldlm_resource_putref(res);
+       if (!lock && !(flags & LDLM_FL_BLOCK_GRANTED))
+               lock = search_queue(&res->lr_waiting, &data);
+       matched = lock ? mode : 0;
+       unlock_res(res);
+       LDLM_RESOURCE_DELREF(res);
+       ldlm_resource_putref(res);
 
-        if (lock) {
-                ldlm_lock2handle(lock, lockh);
-                if ((flags & LDLM_FL_LVB_READY) &&
+       if (lock) {
+               ldlm_lock2handle(lock, lockh);
+               if ((flags & LDLM_FL_LVB_READY) &&
                    (!ldlm_is_lvb_ready(lock))) {
                        __u64 wait_flags = LDLM_FL_LVB_READY |
                                LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
-                        struct l_wait_info lwi;
-                        if (lock->l_completion_ast) {
-                                int err = lock->l_completion_ast(lock,
-                                                          LDLM_FL_WAIT_NOREPROC,
-                                                                 NULL);
-                                if (err) {
-                                        if (flags & LDLM_FL_TEST_LOCK)
-                                                LDLM_LOCK_RELEASE(lock);
-                                        else
-                                                ldlm_lock_decref_internal(lock,
-                                                                          mode);
-                                        rc = 0;
-                                        goto out2;
-                                }
-                        }
+                       struct l_wait_info lwi;
+
+                       if (lock->l_completion_ast) {
+                               int err = lock->l_completion_ast(lock,
+                                                       LDLM_FL_WAIT_NOREPROC,
+                                                       NULL);
+                               if (err)
+                                       GOTO(out_fail_match, matched = 0);
+                       }
 
-                        lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
-                                               NULL, LWI_ON_SIGNAL_NOOP, NULL);
+                       lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
+                                              NULL, LWI_ON_SIGNAL_NOOP, NULL);
 
                        /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
-                       l_wait_event(lock->l_waitq,
-                                    lock->l_flags & wait_flags,
+                       l_wait_event(lock->l_waitq, lock->l_flags & wait_flags,
                                     &lwi);
-                       if (!ldlm_is_lvb_ready(lock)) {
-                                if (flags & LDLM_FL_TEST_LOCK)
-                                        LDLM_LOCK_RELEASE(lock);
-                                else
-                                        ldlm_lock_decref_internal(lock, mode);
-                                rc = 0;
-                        }
-                }
-        }
- out2:
-        if (rc) {
-               LDLM_DEBUG(lock, "matched (%llu %llu)",
-                           (type == LDLM_PLAIN || type == LDLM_IBITS) ?
-                                res_id->name[2] : policy->l_extent.start,
-                           (type == LDLM_PLAIN || type == LDLM_IBITS) ?
-                                res_id->name[3] : policy->l_extent.end);
-
-                /* check user's security context */
-                if (lock->l_conn_export &&
-                    sptlrpc_import_check_ctx(
-                                class_exp2cliimp(lock->l_conn_export))) {
-                        if (!(flags & LDLM_FL_TEST_LOCK))
-                                ldlm_lock_decref_internal(lock, mode);
-                        rc = 0;
-                }
+                       if (!ldlm_is_lvb_ready(lock))
+                               GOTO(out_fail_match, matched = 0);
+               }
+
+               /* check user's security context */
+               if (lock->l_conn_export &&
+                   sptlrpc_import_check_ctx(
+                               class_exp2cliimp(lock->l_conn_export)))
+                       GOTO(out_fail_match, matched = 0);
 
-                if (flags & LDLM_FL_TEST_LOCK)
-                        LDLM_LOCK_RELEASE(lock);
+               LDLM_DEBUG(lock, "matched (%llu %llu)",
+                          (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+                          res_id->name[2] : policy->l_extent.start,
+                          (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+                          res_id->name[3] : policy->l_extent.end);
+
+out_fail_match:
+               if (flags & LDLM_FL_TEST_LOCK)
+                       LDLM_LOCK_RELEASE(lock);
+               else if (!matched)
+                       ldlm_lock_decref_internal(lock, mode);
+       }
 
-        } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
-                LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
+       /* less verbose for test-only */
+       if (!matched && !(flags & LDLM_FL_TEST_LOCK)) {
+               LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
                                  "%llu/%llu (%llu %llu)", ns,
-                                  type, mode, res_id->name[0], res_id->name[1],
-                                  (type == LDLM_PLAIN || type == LDLM_IBITS) ?
-                                        res_id->name[2] :policy->l_extent.start,
-                                  (type == LDLM_PLAIN || type == LDLM_IBITS) ?
-                                        res_id->name[3] : policy->l_extent.end);
-        }
+                                 type, mode, res_id->name[0], res_id->name[1],
+                                 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+                                 res_id->name[2] : policy->l_extent.start,
+                                 (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+                                 res_id->name[3] : policy->l_extent.end);
+       }
        if (data.lmd_old != NULL)
                LDLM_LOCK_PUT(data.lmd_old);
 
-       return rc ? mode : 0;
+       return matched;
 }
-EXPORT_SYMBOL(ldlm_lock_match);
+EXPORT_SYMBOL(ldlm_lock_match_with_skip);
 
 enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
                                           __u64 *bits)
@@ -1727,7 +1728,8 @@ restart:
  * set, skip all the enqueueing and delegate lock processing to intent policy
  * function.
  */
-enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
+                                 struct ldlm_namespace *ns,
                                  struct ldlm_lock **lockp,
                                  void *cookie, __u64 *flags)
 {
@@ -1741,8 +1743,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
         /* policies are not executed on the client or during replay */
         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
             && !local && ns->ns_policy) {
-                rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
-                                   NULL);
+               rc = ns->ns_policy(env, ns, lockp, cookie, lock->l_req_mode,
+                                  *flags, NULL);
                 if (rc == ELDLM_LOCK_REPLACED) {
                         /* The lock that was returned has already been granted,
                          * and placed into lockp.  If it's not the same as the
@@ -1755,7 +1757,7 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
                         *flags |= LDLM_FL_LOCK_CHANGED;
                         RETURN(0);
                } else if (rc != ELDLM_OK &&
-                          lock->l_req_mode == lock->l_granted_mode) {
+                          ldlm_is_granted(lock)) {
                        LASSERT(*flags & LDLM_FL_RESENT);
                        /* It may happen that ns_policy returns an error in
                         * resend case, object may be unlinked or just some
@@ -1778,7 +1780,7 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
                 * Take NO_TIMEOUT from the lock as it is inherited through
                 * LDLM_FL_INHERIT_MASK */
                *flags |= LDLM_FL_LOCK_CHANGED;
-               if (lock->l_req_mode != lock->l_granted_mode)
+               if (!ldlm_is_granted(lock))
                        *flags |= LDLM_FL_BLOCK_GRANTED;
                *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
                RETURN(ELDLM_OK);
@@ -1791,8 +1793,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
        if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
                OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
 
-        lock_res_and_lock(lock);
-        if (local && lock->l_req_mode == lock->l_granted_mode) {
+       lock_res_and_lock(lock);
+       if (local && ldlm_is_granted(lock)) {
                 /* The server returned a blocked lock, but it was granted
                  * before we got a chance to actually enqueue it.  We don't
                  * need to do anything else. */
@@ -1989,7 +1991,7 @@ int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
                        RETURN(-EAGAIN);
 
                /* lock was granted while resource was unlocked. */
-               if (lock->l_granted_mode == lock->l_req_mode) {
+               if (ldlm_is_granted(lock)) {
                        /* bug 11300: if the lock has been granted,
                         * break earlier because otherwise, we will go
                         * to restart and ldlm_resource_unlink will be
@@ -2015,27 +2017,21 @@ int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
  */
 void ldlm_discard_bl_list(struct list_head *bl_list)
 {
-       struct list_head *tmp, *pos;
-        ENTRY;
+       struct ldlm_lock *lock, *tmp;
 
-       list_for_each_safe(pos, tmp, bl_list) {
-                struct ldlm_lock *lock =
-                       list_entry(pos, struct ldlm_lock, l_bl_ast);
+       ENTRY;
 
+       list_for_each_entry_safe(lock, tmp, bl_list, l_bl_ast) {
+               LASSERT(!list_empty(&lock->l_bl_ast));
                list_del_init(&lock->l_bl_ast);
-               LASSERT(ldlm_is_ast_sent(lock));
                ldlm_clear_ast_sent(lock);
                LASSERT(lock->l_bl_ast_run == 0);
-               LASSERT(lock->l_blocking_lock);
-               LDLM_LOCK_RELEASE(lock->l_blocking_lock);
-               lock->l_blocking_lock = NULL;
+               ldlm_clear_blocking_lock(lock);
                LDLM_LOCK_RELEASE(lock);
        }
        EXIT;
 }
 
-#endif
-
 /**
  * Process a call to blocking AST callback for a lock in ast_work list
  */
@@ -2043,9 +2039,11 @@ static int
 ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
 {
        struct ldlm_cb_set_arg *arg = opaq;
-       struct ldlm_lock_desc   d;
-       int                     rc;
-       struct ldlm_lock       *lock;
+       struct ldlm_lock *lock;
+       struct ldlm_lock_desc d;
+       struct ldlm_bl_desc bld;
+       int rc;
+
        ENTRY;
 
        if (list_empty(arg->list))
@@ -2053,16 +2051,23 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
 
        lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
 
-       /* nobody should touch l_bl_ast */
+       /* nobody should touch l_bl_ast but some locks in the list may become
+        * granted after lock convert or COS downgrade, these locks should be
+        * just skipped here and removed from the list.
+        */
        lock_res_and_lock(lock);
        list_del_init(&lock->l_bl_ast);
 
-       LASSERT(ldlm_is_ast_sent(lock));
-       LASSERT(lock->l_bl_ast_run == 0);
-       LASSERT(lock->l_blocking_lock);
-       lock->l_bl_ast_run++;
-       unlock_res_and_lock(lock);
+       /* lock is not blocking lock anymore, but was kept in the list because
+        * it can managed only here.
+        */
+       if (!ldlm_is_ast_sent(lock)) {
+               unlock_res_and_lock(lock);
+               LDLM_LOCK_RELEASE(lock);
+               RETURN(0);
+       }
 
+       LASSERT(lock->l_blocking_lock);
        ldlm_lock2desc(lock->l_blocking_lock, &d);
        /* copy blocking lock ibits in cancel_bits as well,
         * new client may use them for lock convert and it is
@@ -2072,54 +2077,23 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
        d.l_policy_data.l_inodebits.cancel_bits =
                lock->l_blocking_lock->l_policy_data.l_inodebits.bits;
 
-       rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
-       LDLM_LOCK_RELEASE(lock->l_blocking_lock);
-       lock->l_blocking_lock = NULL;
-       LDLM_LOCK_RELEASE(lock);
-
-       RETURN(rc);
-}
-
-/**
- * Process a call to completion AST callback for a lock in ast_work list
- */
-static int
-ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
-{
-       struct ldlm_cb_set_arg  *arg = opaq;
-       int                      rc = 0;
-       struct ldlm_lock        *lock;
-       ldlm_completion_callback completion_callback;
-       ENTRY;
-
-       if (list_empty(arg->list))
-               RETURN(-ENOENT);
-
-       lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
-
-       /* It's possible to receive a completion AST before we've set
-        * the l_completion_ast pointer: either because the AST arrived
-        * before the reply, or simply because there's a small race
-        * window between receiving the reply and finishing the local
-        * enqueue. (bug 842)
-        *
-        * This can't happen with the blocking_ast, however, because we
-        * will never call the local blocking_ast until we drop our
-        * reader/writer reference, which we won't do until we get the
-        * reply and finish enqueueing. */
+       /* Blocking lock is being destroyed here but some information about it
+        * may be needed inside l_blocking_ast() function below,
+        * e.g. in mdt_blocking_ast(). So save needed data in bl_desc.
+        */
+       bld.bl_same_client = lock->l_client_cookie ==
+                            lock->l_blocking_lock->l_client_cookie;
+       bld.bl_cos_incompat = ldlm_is_cos_incompat(lock->l_blocking_lock);
+       arg->bl_desc = &bld;
 
-       /* nobody should touch l_cp_ast */
-       lock_res_and_lock(lock);
-       list_del_init(&lock->l_cp_ast);
-       LASSERT(ldlm_is_cp_reqd(lock));
-       /* save l_completion_ast since it can be changed by
-        * mds_intent_policy(), see bug 14225 */
-       completion_callback = lock->l_completion_ast;
-       ldlm_clear_cp_reqd(lock);
+       LASSERT(ldlm_is_ast_sent(lock));
+       LASSERT(lock->l_bl_ast_run == 0);
+       lock->l_bl_ast_run++;
+       ldlm_clear_blocking_lock(lock);
        unlock_res_and_lock(lock);
 
-       if (completion_callback != NULL)
-               rc = completion_callback(lock, 0, (void *)arg);
+       rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
+
        LDLM_LOCK_RELEASE(lock);
 
        RETURN(rc);
@@ -2191,6 +2165,53 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
 
        RETURN(rc);
 }
+#endif
+
+/**
+ * Process a call to completion AST callback for a lock in ast_work list
+ */
+static int
+ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
+{
+       struct ldlm_cb_set_arg *arg = opaq;
+       struct ldlm_lock *lock;
+       ldlm_completion_callback completion_callback;
+       int rc = 0;
+
+       ENTRY;
+
+       if (list_empty(arg->list))
+               RETURN(-ENOENT);
+
+       lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+
+       /* It's possible to receive a completion AST before we've set
+        * the l_completion_ast pointer: either because the AST arrived
+        * before the reply, or simply because there's a small race
+        * window between receiving the reply and finishing the local
+        * enqueue. (bug 842)
+        *
+        * This can't happen with the blocking_ast, however, because we
+        * will never call the local blocking_ast until we drop our
+        * reader/writer reference, which we won't do until we get the
+        * reply and finish enqueueing. */
+
+       /* nobody should touch l_cp_ast */
+       lock_res_and_lock(lock);
+       list_del_init(&lock->l_cp_ast);
+       LASSERT(ldlm_is_cp_reqd(lock));
+       /* save l_completion_ast since it can be changed by
+        * mds_intent_policy(), see bug 14225 */
+       completion_callback = lock->l_completion_ast;
+       ldlm_clear_cp_reqd(lock);
+       unlock_res_and_lock(lock);
+
+       if (completion_callback != NULL)
+               rc = completion_callback(lock, 0, (void *)arg);
+       LDLM_LOCK_RELEASE(lock);
+
+       RETURN(rc);
+}
 
 /**
  * Process list of locks in need of ASTs being sent.
@@ -2199,11 +2220,11 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
  * one.
  */
 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
-                      ldlm_desc_ast_t ast_type)
+                     ldlm_desc_ast_t ast_type)
 {
        struct ldlm_cb_set_arg *arg;
-       set_producer_func       work_ast_lock;
-       int                     rc;
+       set_producer_func work_ast_lock;
+       int rc;
 
        if (list_empty(rpc_list))
                RETURN(0);
@@ -2216,24 +2237,26 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
        arg->list = rpc_list;
 
        switch (ast_type) {
-               case LDLM_WORK_BL_AST:
-                       arg->type = LDLM_BL_CALLBACK;
-                       work_ast_lock = ldlm_work_bl_ast_lock;
-                       break;
-               case LDLM_WORK_CP_AST:
-                       arg->type = LDLM_CP_CALLBACK;
-                       work_ast_lock = ldlm_work_cp_ast_lock;
-                       break;
-               case LDLM_WORK_REVOKE_AST:
-                       arg->type = LDLM_BL_CALLBACK;
-                       work_ast_lock = ldlm_work_revoke_ast_lock;
-                       break;
-               case LDLM_WORK_GL_AST:
-                       arg->type = LDLM_GL_CALLBACK;
-                       work_ast_lock = ldlm_work_gl_ast_lock;
-                       break;
-               default:
-                       LBUG();
+       case LDLM_WORK_CP_AST:
+               arg->type = LDLM_CP_CALLBACK;
+               work_ast_lock = ldlm_work_cp_ast_lock;
+               break;
+#ifdef HAVE_SERVER_SUPPORT
+       case LDLM_WORK_BL_AST:
+               arg->type = LDLM_BL_CALLBACK;
+               work_ast_lock = ldlm_work_bl_ast_lock;
+               break;
+       case LDLM_WORK_REVOKE_AST:
+               arg->type = LDLM_BL_CALLBACK;
+               work_ast_lock = ldlm_work_revoke_ast_lock;
+               break;
+       case LDLM_WORK_GL_AST:
+               arg->type = LDLM_GL_CALLBACK;
+               work_ast_lock = ldlm_work_gl_ast_lock;
+               break;
+#endif
+       default:
+               LBUG();
        }
 
        /* We create a ptlrpc request set with flow control extension.
@@ -2245,7 +2268,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
        if (arg->set == NULL)
                GOTO(out, rc = -ENOMEM);
 
-       ptlrpc_set_wait(arg->set);
+       ptlrpc_set_wait(NULL, arg->set);
        ptlrpc_set_destroy(arg->set);
 
        rc = atomic_read(&arg->restart) ? -ERESTART : 0;
@@ -2422,8 +2445,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
         ldlm_resource_unlink_lock(lock);
         ldlm_lock_destroy_nolock(lock);
 
-        if (lock->l_granted_mode == lock->l_req_mode)
-                ldlm_pool_del(&ns->ns_pool, lock);
+       if (ldlm_is_granted(lock))
+               ldlm_pool_del(&ns->ns_pool, lock);
 
         /* Make sure we will not be called again for same lock what is possible
          * if not to zero out lock->l_granted_mode */
@@ -2455,6 +2478,7 @@ int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
 EXPORT_SYMBOL(ldlm_lock_set_data);
 
 struct export_cl_data {
+       const struct lu_env     *ecl_env;
        struct obd_export       *ecl_exp;
        int                     ecl_loop;
 };
@@ -2507,10 +2531,17 @@ ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
  */
 int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
 {
+       struct lu_env env;
        struct export_cl_data   ecl = {
                .ecl_exp        = exp,
                .ecl_loop       = 0,
        };
+       int rc;
+
+       rc = lu_env_init(&env, LCT_DT_THREAD);
+       if (rc)
+               RETURN(rc);
+       ecl.ecl_env = &env;
 
        while (!list_empty(&exp->exp_bl_list)) {
                struct ldlm_lock *lock;
@@ -2533,6 +2564,8 @@ int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
                LDLM_LOCK_RELEASE(lock);
        }
 
+       lu_env_fini(&env);
+
        CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
               "left on hash table %d.\n", exp, ecl.ecl_loop,
               atomic_read(&exp->exp_lock_hash->hs_count));
@@ -2547,10 +2580,16 @@ int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
  */
 int ldlm_export_cancel_locks(struct obd_export *exp)
 {
-       struct export_cl_data   ecl = {
-               .ecl_exp        = exp,
-               .ecl_loop       = 0,
-       };
+       struct export_cl_data ecl;
+       struct lu_env env;
+       int rc;
+
+       rc = lu_env_init(&env, LCT_DT_THREAD);
+       if (rc)
+               RETURN(rc);
+       ecl.ecl_env = &env;
+       ecl.ecl_exp = exp;
+       ecl.ecl_loop = 0;
 
        cfs_hash_for_each_empty(exp->exp_lock_hash,
                                ldlm_cancel_locks_for_export_cb, &ecl);
@@ -2564,6 +2603,8 @@ int ldlm_export_cancel_locks(struct obd_export *exp)
            exp->exp_obd->obd_stopping)
                ldlm_reprocess_recovery_done(exp->exp_obd->obd_namespace);
 
+       lu_env_fini(&env);
+
        return ecl.ecl_loop;
 }
 
@@ -2574,13 +2615,18 @@ int ldlm_export_cancel_locks(struct obd_export *exp)
  * convertion may fail if lock was canceled before downgrade, but it doesn't
  * indicate any problem, because such lock has no reader or writer, and will
  * be released soon.
- * Used by Commit on Sharing (COS) code only for now.
+ *
+ * Used by Commit on Sharing (COS) code to force object changes commit in case
+ * of conflict. Converted lock is considered as new lock and all blocking AST
+ * things are cleared, so any pending or new blocked lock on that lock will
+ * cause new call to blocking_ast and force resource object commit.
  *
  * \param lock A lock to convert
  * \param new_mode new lock mode
  */
 void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
 {
+#ifdef HAVE_SERVER_SUPPORT
        ENTRY;
 
        LASSERT(new_mode == LCK_COS);
@@ -2601,14 +2647,20 @@ void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
         * ldlm_grant_lock() called below.
         */
        ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
+
+       /* Consider downgraded lock as a new lock and clear all states
+        * related to a previous blocking AST processing.
+        */
+       ldlm_clear_blocking_data(lock);
+
        lock->l_req_mode = new_mode;
        ldlm_grant_lock(lock, NULL);
-
        unlock_res_and_lock(lock);
 
        ldlm_reprocess_all(lock->l_resource);
 
        EXIT;
+#endif
 }
 EXPORT_SYMBOL(ldlm_lock_mode_downgrade);
 
@@ -2645,6 +2697,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
         va_list args;
         struct obd_export *exp = lock->l_export;
        struct ldlm_resource *resource = NULL;
+       struct va_format vaf;
         char *nid = "local";
 
        /* on server-side resource of lock doesn't change */
@@ -2658,6 +2711,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
        }
 
         va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
 
         if (exp && exp->exp_connection) {
                nid = obd_export_nid2str(exp);
@@ -2667,111 +2722,110 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
         }
 
         if (resource == NULL) {
-                libcfs_debug_vmsg2(msgdata, fmt, args,
-                      " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
-                      "res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s "
-                      "remote: %#llx expref: %d pid: %u timeout: %lld "
-                      "lvb_type: %d\n",
-                       lock,
-                      lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                      exp ? atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+               libcfs_debug_msg(msgdata,
+                                "%pV ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
+                                &vaf,
+                                lock,
+                                lock->l_handle.h_cookie,
+                                atomic_read(&lock->l_refc),
+                                lock->l_readers, lock->l_writers,
+                                ldlm_lockname[lock->l_granted_mode],
+                                ldlm_lockname[lock->l_req_mode],
+                                lock->l_flags, nid,
+                                lock->l_remote_handle.cookie,
+                                exp ? atomic_read(&exp->exp_refcount) : -99,
+                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_lvb_type);
                 va_end(args);
                 return;
         }
 
        switch (resource->lr_type) {
        case LDLM_EXTENT:
-               libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
-                       "res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] "
-                       "(req %llu->%llu) flags: %#llx nid: %s remote: "
-                       "%#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       PLDLMRES(resource),
-                       atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_policy_data.l_extent.start,
-                       lock->l_policy_data.l_extent.end,
-                       lock->l_req_extent.start, lock->l_req_extent.end,
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout,
-                       lock->l_lvb_type);
+               libcfs_debug_msg(msgdata,
+                                "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
+                                &vaf,
+                                ldlm_lock_to_ns_name(lock), lock,
+                                lock->l_handle.h_cookie,
+                                atomic_read(&lock->l_refc),
+                                lock->l_readers, lock->l_writers,
+                                ldlm_lockname[lock->l_granted_mode],
+                                ldlm_lockname[lock->l_req_mode],
+                                PLDLMRES(resource),
+                                atomic_read(&resource->lr_refcount),
+                                ldlm_typename[resource->lr_type],
+                                lock->l_policy_data.l_extent.start,
+                                lock->l_policy_data.l_extent.end,
+                                lock->l_req_extent.start, lock->l_req_extent.end,
+                                lock->l_flags, nid,
+                                lock->l_remote_handle.cookie,
+                                exp ? atomic_read(&exp->exp_refcount) : -99,
+                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_lvb_type);
                break;
 
        case LDLM_FLOCK:
-               libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
-                       "res: "DLDLMRES" rrc: %d type: %s pid: %d "
-                       "[%llu->%llu] flags: %#llx nid: %s "
-                       "remote: %#llx expref: %d pid: %u timeout: %lld\n",
-                       ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       PLDLMRES(resource),
-                       atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_policy_data.l_flock.pid,
-                       lock->l_policy_data.l_flock.start,
-                       lock->l_policy_data.l_flock.end,
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout);
+               libcfs_debug_msg(msgdata,
+                                "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld\n",
+                                &vaf,
+                                ldlm_lock_to_ns_name(lock), lock,
+                                lock->l_handle.h_cookie,
+                                atomic_read(&lock->l_refc),
+                                lock->l_readers, lock->l_writers,
+                                ldlm_lockname[lock->l_granted_mode],
+                                ldlm_lockname[lock->l_req_mode],
+                                PLDLMRES(resource),
+                                atomic_read(&resource->lr_refcount),
+                                ldlm_typename[resource->lr_type],
+                                lock->l_policy_data.l_flock.pid,
+                                lock->l_policy_data.l_flock.start,
+                                lock->l_policy_data.l_flock.end,
+                                lock->l_flags, nid,
+                                lock->l_remote_handle.cookie,
+                                exp ? atomic_read(&exp->exp_refcount) : -99,
+                                lock->l_pid, lock->l_callback_timeout);
                break;
 
        case LDLM_IBITS:
-               libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
-                       "res: "DLDLMRES" bits %#llx/%#llx rrc: %d type: %s "
-                       "flags: %#llx nid: %s remote: %#llx expref: %d "
-                       "pid: %u timeout: %lld lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock),
-                       lock, lock->l_handle.h_cookie,
-                       atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       PLDLMRES(resource),
-                       lock->l_policy_data.l_inodebits.bits,
-                       lock->l_policy_data.l_inodebits.try_bits,
-                       atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout,
-                       lock->l_lvb_type);
+               libcfs_debug_msg(msgdata,
+                                "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
+                                &vaf,
+                                ldlm_lock_to_ns_name(lock),
+                                lock, lock->l_handle.h_cookie,
+                                atomic_read(&lock->l_refc),
+                                lock->l_readers, lock->l_writers,
+                                ldlm_lockname[lock->l_granted_mode],
+                                ldlm_lockname[lock->l_req_mode],
+                                PLDLMRES(resource),
+                                lock->l_policy_data.l_inodebits.bits,
+                                lock->l_policy_data.l_inodebits.try_bits,
+                                atomic_read(&resource->lr_refcount),
+                                ldlm_typename[resource->lr_type],
+                                lock->l_flags, nid,
+                                lock->l_remote_handle.cookie,
+                                exp ? atomic_read(&exp->exp_refcount) : -99,
+                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_lvb_type);
                break;
 
        default:
-               libcfs_debug_vmsg2(msgdata, fmt, args,
-                       " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
-                       "res: "DLDLMRES" rrc: %d type: %s flags: %#llx "
-                       "nid: %s remote: %#llx expref: %d pid: %u "
-                       "timeout: %lld lvb_type: %d\n",
-                       ldlm_lock_to_ns_name(lock),
-                       lock, lock->l_handle.h_cookie,
-                       atomic_read(&lock->l_refc),
-                       lock->l_readers, lock->l_writers,
-                       ldlm_lockname[lock->l_granted_mode],
-                       ldlm_lockname[lock->l_req_mode],
-                       PLDLMRES(resource),
-                       atomic_read(&resource->lr_refcount),
-                       ldlm_typename[resource->lr_type],
-                       lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? atomic_read(&exp->exp_refcount) : -99,
-                       lock->l_pid, lock->l_callback_timeout,
-                       lock->l_lvb_type);
+               libcfs_debug_msg(msgdata,
+                                "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
+                                &vaf,
+                                ldlm_lock_to_ns_name(lock),
+                                lock, lock->l_handle.h_cookie,
+                                atomic_read(&lock->l_refc),
+                                lock->l_readers, lock->l_writers,
+                                ldlm_lockname[lock->l_granted_mode],
+                                ldlm_lockname[lock->l_req_mode],
+                                PLDLMRES(resource),
+                                atomic_read(&resource->lr_refcount),
+                                ldlm_typename[resource->lr_type],
+                                lock->l_flags, nid,
+                                lock->l_remote_handle.cookie,
+                                exp ? atomic_read(&exp->exp_refcount) : -99,
+                                lock->l_pid, lock->l_callback_timeout,
+                                lock->l_lvb_type);
                break;
        }
        va_end(args);