Whamcloud - gitweb
LU-12616 obclass: fix MDS start/stop race
[fs/lustre-release.git] / lustre / osc / osc_lock.c
index 4d2122b..10849d0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_OSC
 
-#include <libcfs/libcfs.h>
 /* fid_build_reg_res_name() */
 #include <lustre_fid.h>
+#include <lustre_osc.h>
 
-#include "osc_cl_internal.h"
+#include "osc_internal.h"
 
 /** \addtogroup osc
  *  @{
  */
 
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static const struct cl_lock_operations osc_lock_ops;
-static const struct cl_lock_operations osc_lock_lockless_ops;
-static void osc_lock_to_lockless(const struct lu_env *env,
-                                 struct osc_lock *ols, int force);
-
-int osc_lock_is_lockless(const struct osc_lock *olck)
-{
-        return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
-}
-
 /**
  * Returns a weak pointer to the ldlm lock identified by a handle. Returned
  * pointer cannot be dereferenced, as lock is not protected from concurrent
@@ -126,7 +106,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
 
        if (! ergo(ols->ols_state == OLS_GRANTED,
                   olock != NULL &&
-                  olock->l_req_mode == olock->l_granted_mode &&
+                  ldlm_is_granted(olock) &&
                   ols->ols_hold))
                return 0;
        return 1;
@@ -138,8 +118,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
  *
  */
 
-static void osc_lock_fini(const struct lu_env *env,
-                          struct cl_lock_slice *slice)
+void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
 {
        struct osc_lock  *ols = cl2osc_lock(slice);
 
@@ -148,6 +127,7 @@ static void osc_lock_fini(const struct lu_env *env,
 
        OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
 }
+EXPORT_SYMBOL(osc_lock_fini);
 
 static void osc_lock_build_policy(const struct lu_env *env,
                                  const struct cl_lock *lock,
@@ -159,25 +139,6 @@ static void osc_lock_build_policy(const struct lu_env *env,
        policy->l_extent.gid = d->cld_gid;
 }
 
-static __u64 osc_enq2ldlm_flags(__u32 enqflags)
-{
-       __u64 result = 0;
-
-       LASSERT((enqflags & ~CEF_MASK) == 0);
-
-       if (enqflags & CEF_NONBLOCK)
-               result |= LDLM_FL_BLOCK_NOWAIT;
-       if (enqflags & CEF_ASYNC)
-               result |= LDLM_FL_HAS_INTENT;
-       if (enqflags & CEF_DISCARD_DATA)
-               result |= LDLM_FL_AST_DISCARD_DATA;
-       if (enqflags & CEF_PEEK)
-               result |= LDLM_FL_TEST_LOCK;
-       if (enqflags & CEF_LOCK_MATCH)
-               result |= LDLM_FL_MATCH_LOCK;
-       return result;
-}
-
 /**
  * Updates object attributes from a lock value block (lvb) received together
  * with the DLM lock reply from the server. Copy of osc_update_enqueue()
@@ -269,7 +230,7 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
 
        /* Lock must have been granted. */
        lock_res_and_lock(dlmlock);
-       if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+       if (ldlm_is_granted(dlmlock)) {
                struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
                struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
 
@@ -306,11 +267,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
        struct cl_lock_slice    *slice = &oscl->ols_cl;
        struct lu_env           *env;
        int                     rc;
-       __u16                   refcheck;
 
        ENTRY;
 
-       env = cl_env_get(&refcheck);
+       env = cl_env_percpu_get();
        /* should never happen, similar to osc_ldlm_blocking_ast(). */
        LASSERT(!IS_ERR(env));
 
@@ -333,7 +293,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
                 * lockless lock.
                 */
                osc_object_set_contended(cl2osc(slice->cls_obj));
-               LASSERT(slice->cls_ops == &osc_lock_ops);
+               LASSERT(slice->cls_ops != oscl->ols_lockless_ops);
 
                /* Change this lock to ldlmlock-less lock. */
                osc_lock_to_lockless(env, oscl, 1);
@@ -345,17 +305,20 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
                                    NULL, &oscl->ols_lvb);
                /* Hide the error. */
                rc = 0;
+       } else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
+               rc = -EWOULDBLOCK;
        }
 
        if (oscl->ols_owner != NULL)
                cl_sync_io_note(env, oscl->ols_owner, rc);
-       cl_env_put(env, &refcheck);
+       cl_env_percpu_put(env);
 
        RETURN(rc);
 }
 
-static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
-                              int errcode)
+static int osc_lock_upcall_speculative(void *cookie,
+                                      struct lustre_handle *lockh,
+                                      int errcode)
 {
        struct osc_object       *osc = cookie;
        struct ldlm_lock        *dlmlock;
@@ -376,9 +339,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
        LASSERT(dlmlock != NULL);
 
        lock_res_and_lock(dlmlock);
-       LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+       LASSERT(ldlm_is_granted(dlmlock));
 
-       /* there is no osc_lock associated with AGL lock */
+       /* there is no osc_lock associated with speculative locks */
        osc_lock_lvb_update(env, osc, dlmlock, NULL);
 
        unlock_res_and_lock(dlmlock);
@@ -391,7 +354,7 @@ out:
 }
 
 static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
-                         enum cl_lock_mode mode, int discard)
+                         enum cl_lock_mode mode, bool discard)
 {
        struct lu_env           *env;
        __u16                   refcheck;
@@ -414,7 +377,12 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
                        rc = 0;
        }
 
-       rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+       /*
+        * Do not try to match other locks with CLM_WRITE since we already
+        * know there're none
+        */
+       rc2 = osc_lock_discard_pages(env, obj, start, end,
+                                    mode == CLM_WRITE || discard);
        if (rc == 0 && rc2 < 0)
                rc = rc2;
 
@@ -432,14 +400,14 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
 {
        struct cl_object        *obj = NULL;
        int                     result = 0;
-       int                     discard;
+       bool                    discard;
        enum cl_lock_mode       mode = CLM_READ;
        ENTRY;
 
        LASSERT(flag == LDLM_CB_CANCELING);
 
        lock_res_and_lock(dlmlock);
-       if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+       if (!ldlm_is_granted(dlmlock)) {
                dlmlock->l_ast_data = NULL;
                unlock_res_and_lock(dlmlock);
                RETURN(0);
@@ -579,7 +547,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
        RETURN(result);
 }
 
-static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
+int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
 {
        struct ptlrpc_request   *req  = data;
        struct lu_env           *env;
@@ -641,15 +609,15 @@ out:
        req->rq_status = result;
        RETURN(result);
 }
+EXPORT_SYMBOL(osc_ldlm_glimpse_ast);
 
 static int weigh_cb(const struct lu_env *env, struct cl_io *io,
                    struct osc_page *ops, void *cbdata)
 {
        struct cl_page *page = ops->ops_cl.cpl_page;
 
-       if (cl_page_is_vmlocked(env, page)
-           || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
-          )
+       if (cl_page_is_vmlocked(env, page) || PageDirty(page->cp_vmpage) ||
+           PageWriteback(page->cp_vmpage))
                return CLP_GANG_ABORT;
 
        *(pgoff_t *)cbdata = osc_index(ops) + 1;
@@ -658,12 +626,13 @@ static int weigh_cb(const struct lu_env *env, struct cl_io *io,
 
 static unsigned long osc_lock_weight(const struct lu_env *env,
                                     struct osc_object *oscobj,
-                                    struct ldlm_extent *extent)
+                                    loff_t start, loff_t end)
 {
-       struct cl_io     *io = &osc_env_info(env)->oti_io;
+       struct cl_io *io = osc_env_thread_io(env);
        struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
-       pgoff_t          page_index;
-       int              result;
+       pgoff_t page_index;
+       int result;
+
        ENTRY;
 
        io->ci_obj = obj;
@@ -672,11 +641,10 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
        if (result != 0)
                RETURN(result);
 
-       page_index = cl_index(obj, extent->start);
+       page_index = cl_index(obj, start);
        do {
                result = osc_page_gang_lookup(env, io, oscobj,
-                                             page_index,
-                                             cl_index(obj, extent->end),
+                                             page_index, cl_index(obj, end),
                                              weigh_cb, (void *)&page_index);
                if (result == CLP_GANG_ABORT)
                        break;
@@ -693,12 +661,13 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
  */
 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 {
-       struct lu_env           *env;
-       struct osc_object       *obj;
-       struct osc_lock         *oscl;
-       unsigned long            weight;
-       bool                    found = false;
-       __u16                   refcheck;
+       struct lu_env *env;
+       struct osc_object *obj;
+       struct osc_lock *oscl;
+       unsigned long weight;
+       bool found = false;
+       __u16 refcheck;
+
        ENTRY;
 
        might_sleep();
@@ -714,16 +683,24 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
                /* Mostly because lack of memory, do not eliminate this lock */
                RETURN(1);
 
-       LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
+       LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT ||
+               dlmlock->l_resource->lr_type == LDLM_IBITS);
+
+       lock_res_and_lock(dlmlock);
        obj = dlmlock->l_ast_data;
+       if (obj)
+               cl_object_get(osc2cl(obj));
+       unlock_res_and_lock(dlmlock);
+
        if (obj == NULL)
                GOTO(out, weight = 1);
 
        spin_lock(&obj->oo_ol_spin);
        list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
-               if (oscl->ols_dlmlock != NULL && oscl->ols_dlmlock != dlmlock)
-                       continue;
-               found = true;
+               if (oscl->ols_dlmlock == dlmlock) {
+                       found = true;
+                       break;
+               }
        }
        spin_unlock(&obj->oo_ol_spin);
        if (found) {
@@ -733,13 +710,28 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
                GOTO(out, weight = 1);
        }
 
-       weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
+       if (dlmlock->l_resource->lr_type == LDLM_EXTENT)
+               weight = osc_lock_weight(env, obj,
+                                        dlmlock->l_policy_data.l_extent.start,
+                                        dlmlock->l_policy_data.l_extent.end);
+       else if (ldlm_has_dom(dlmlock))
+               weight = osc_lock_weight(env, obj, 0, OBD_OBJECT_EOF);
+       /* The DOM bit can be cancelled at any time; in that case, we know
+        * there are no pages, so just return weight of 0
+        */
+       else
+               weight = 0;
+
        EXIT;
 
 out:
+       if (obj)
+               cl_object_put(env, osc2cl(obj));
+
        cl_env_put(env, &refcheck);
        return weight;
 }
+EXPORT_SYMBOL(osc_ldlm_weigh_ast);
 
 static void osc_lock_build_einfo(const struct lu_env *env,
                                 const struct cl_lock *lock,
@@ -766,46 +758,46 @@ static void osc_lock_build_einfo(const struct lu_env *env,
  *  Additional policy can be implemented here, e.g., never do lockless-io
  *  for large extents.
  */
-static void osc_lock_to_lockless(const struct lu_env *env,
-                                 struct osc_lock *ols, int force)
+void osc_lock_to_lockless(const struct lu_env *env,
+                         struct osc_lock *ols, int force)
 {
-        struct cl_lock_slice *slice = &ols->ols_cl;
-
-        LASSERT(ols->ols_state == OLS_NEW ||
-                ols->ols_state == OLS_UPCALL_RECEIVED);
-
-        if (force) {
-                ols->ols_locklessable = 1;
-                slice->cls_ops = &osc_lock_lockless_ops;
-        } else {
-                struct osc_io *oio     = osc_env_io(env);
-                struct cl_io  *io      = oio->oi_cl.cis_io;
-                struct cl_object *obj  = slice->cls_obj;
-                struct osc_object *oob = cl2osc(obj);
-                const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
-                struct obd_connect_data *ocd;
-
-                LASSERT(io->ci_lockreq == CILR_MANDATORY ||
-                        io->ci_lockreq == CILR_MAYBE ||
-                        io->ci_lockreq == CILR_NEVER);
-
-                ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
-                ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
-                                (io->ci_lockreq == CILR_MAYBE) &&
-                                (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
-                if (io->ci_lockreq == CILR_NEVER ||
-                        /* lockless IO */
-                    (ols->ols_locklessable && osc_object_is_contended(oob)) ||
-                        /* lockless truncate */
-                    (cl_io_is_trunc(io) &&
-                     (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
-                      osd->od_lockless_truncate)) {
-                        ols->ols_locklessable = 1;
-                        slice->cls_ops = &osc_lock_lockless_ops;
-                }
-        }
-        LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
+       struct cl_lock_slice *slice = &ols->ols_cl;
+       struct osc_io *oio = osc_env_io(env);
+       struct cl_io *io = oio->oi_cl.cis_io;
+       struct cl_object *obj = slice->cls_obj;
+       struct osc_object *oob = cl2osc(obj);
+       const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
+       struct obd_connect_data *ocd;
+
+       LASSERT(ols->ols_state == OLS_NEW ||
+               ols->ols_state == OLS_UPCALL_RECEIVED);
+
+       if (force) {
+               ols->ols_locklessable = 1;
+               slice->cls_ops = ols->ols_lockless_ops;
+       } else {
+               LASSERT(io->ci_lockreq == CILR_MANDATORY ||
+                       io->ci_lockreq == CILR_MAYBE ||
+                       io->ci_lockreq == CILR_NEVER);
+
+               ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
+               ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
+                                       (io->ci_lockreq == CILR_MAYBE) &&
+                                       (ocd->ocd_connect_flags &
+                                        OBD_CONNECT_SRVLOCK);
+               if (io->ci_lockreq == CILR_NEVER ||
+                   /* lockless IO */
+                   (ols->ols_locklessable && osc_object_is_contended(oob)) ||
+                   /* lockless truncate */
+                   (cl_io_is_trunc(io) && osd->od_lockless_truncate &&
+                    (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK))) {
+                       ols->ols_locklessable = 1;
+                       slice->cls_ops = ols->ols_lockless_ops;
+               }
+       }
+       LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
 }
+EXPORT_SYMBOL(osc_lock_to_lockless);
 
 static bool osc_lock_compatible(const struct osc_lock *qing,
                                const struct osc_lock *qed)
@@ -813,7 +805,7 @@ static bool osc_lock_compatible(const struct osc_lock *qing,
        struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
        struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
 
-       if (qed->ols_glimpse)
+       if (qed->ols_glimpse || qed->ols_speculative)
                return true;
 
        if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
@@ -830,9 +822,8 @@ static bool osc_lock_compatible(const struct osc_lock *qing,
        return false;
 }
 
-static void osc_lock_wake_waiters(const struct lu_env *env,
-                                 struct osc_object *osc,
-                                 struct osc_lock *oscl)
+void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
+                          struct osc_lock *oscl)
 {
        spin_lock(&osc->oo_ol_spin);
        list_del_init(&oscl->ols_nextlock_oscobj);
@@ -850,14 +841,16 @@ static void osc_lock_wake_waiters(const struct lu_env *env,
        }
        spin_unlock(&oscl->ols_lock);
 }
+EXPORT_SYMBOL(osc_lock_wake_waiters);
 
-static int osc_lock_enqueue_wait(const struct lu_env *env,
-               struct osc_object *obj, struct osc_lock *oscl)
+int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
+                         struct osc_lock *oscl)
 {
        struct osc_lock         *tmp_oscl;
        struct cl_lock_descr    *need = &oscl->ols_cl.cls_lock->cll_descr;
        struct cl_sync_io       *waiter = &osc_env_info(env)->oti_anchor;
        int rc = 0;
+
        ENTRY;
 
        spin_lock(&obj->oo_ol_spin);
@@ -885,7 +878,7 @@ restart:
                        continue;
 
                /* wait for conflicting lock to be canceled */
-               cl_sync_io_init(waiter, 1, cl_sync_io_end);
+               cl_sync_io_init(waiter, 1);
                oscl->ols_owner = waiter;
 
                spin_lock(&tmp_oscl->ols_lock);
@@ -908,6 +901,7 @@ restart:
 
        RETURN(rc);
 }
+EXPORT_SYMBOL(osc_lock_enqueue_wait);
 
 /**
  * Implementation of cl_lock_operations::clo_enqueue() method for osc
@@ -931,6 +925,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
        struct osc_io                   *oio   = osc_env_io(env);
        struct osc_object               *osc   = cl2osc(slice->cls_obj);
        struct osc_lock                 *oscl  = cl2osc_lock(slice);
+       struct obd_export               *exp   = osc_export(osc);
        struct cl_lock                  *lock  = slice->cls_lock;
        struct ldlm_res_id              *resname = &info->oti_resname;
        union ldlm_policy_data          *policy  = &info->oti_policy;
@@ -947,11 +942,22 @@ static int osc_lock_enqueue(const struct lu_env *env,
        if (oscl->ols_state == OLS_GRANTED)
                RETURN(0);
 
+       if ((oscl->ols_flags & LDLM_FL_NO_EXPANSION) &&
+           !(exp_connect_lockahead_old(exp) || exp_connect_lockahead(exp))) {
+               result = -EOPNOTSUPP;
+               CERROR("%s: server does not support lockahead/locknoexpand:"
+                      "rc = %d\n", exp->exp_obd->obd_name, result);
+               RETURN(result);
+       }
+
        if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
                GOTO(enqueue_base, 0);
 
-       if (oscl->ols_glimpse) {
-               LASSERT(equi(oscl->ols_agl, anchor == NULL));
+       /* For glimpse and/or speculative locks, do not wait for reply from
+        * server on LDLM request */
+       if (oscl->ols_glimpse || oscl->ols_speculative) {
+               /* Speculative and glimpse locks do not have an anchor */
+               LASSERT(equi(oscl->ols_speculative, anchor == NULL));
                async = true;
                GOTO(enqueue_base, 0);
        }
@@ -977,25 +983,31 @@ enqueue_base:
 
        /**
         * DLM lock's ast data must be osc_object;
-        * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
+        * if glimpse or speculative lock, async of osc_enqueue_base()
+        * must be true
+        *
+        * For non-speculative locks:
         * DLM's enqueue callback set to osc_lock_upcall() with cookie as
         * osc_lock.
+        * For speculative locks:
+        * osc_lock_upcall_speculative & cookie is the osc object, since
+        * there is no osc_lock
         */
        ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
        osc_lock_build_policy(env, lock, policy);
-       if (oscl->ols_agl) {
+       if (oscl->ols_speculative) {
                oscl->ols_einfo.ei_cbdata = NULL;
                /* hold a reference for callback */
                cl_object_get(osc2cl(osc));
-               upcall = osc_lock_upcall_agl;
+               upcall = osc_lock_upcall_speculative;
                cookie = osc;
        }
-       result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
+       result = osc_enqueue_base(exp, resname, &oscl->ols_flags,
                                  policy, &oscl->ols_lvb,
                                  osc->oo_oinfo->loi_kms_valid,
                                  upcall, cookie,
                                  &oscl->ols_einfo, PTLRPCD_SET, async,
-                                 oscl->ols_agl);
+                                 oscl->ols_speculative);
        if (result == 0) {
                if (osc_lock_is_lockless(oscl)) {
                        oio->oi_lockless = 1;
@@ -1004,9 +1016,12 @@ enqueue_base:
                        LASSERT(oscl->ols_hold);
                        LASSERT(oscl->ols_dlmlock != NULL);
                }
-       } else if (oscl->ols_agl) {
+       } else if (oscl->ols_speculative) {
                cl_object_put(env, osc2cl(osc));
-               result = 0;
+               if (oscl->ols_glimpse) {
+                       /* hide error for AGL request */
+                       result = 0;
+               }
        }
 
 out:
@@ -1064,8 +1079,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
  *
  *     - cancels ldlm lock (ldlm_cli_cancel()).
  */
-static void osc_lock_cancel(const struct lu_env *env,
-                            const struct cl_lock_slice *slice)
+void osc_lock_cancel(const struct lu_env *env,
+                    const struct cl_lock_slice *slice)
 {
        struct osc_object *obj  = cl2osc(slice->cls_obj);
        struct osc_lock   *oscl = cl2osc_lock(slice);
@@ -1081,9 +1096,10 @@ static void osc_lock_cancel(const struct lu_env *env,
        osc_lock_wake_waiters(env, obj, oscl);
        EXIT;
 }
+EXPORT_SYMBOL(osc_lock_cancel);
 
-static int osc_lock_print(const struct lu_env *env, void *cookie,
-                         lu_printer_t p, const struct cl_lock_slice *slice)
+int osc_lock_print(const struct lu_env *env, void *cookie,
+                  lu_printer_t p, const struct cl_lock_slice *slice)
 {
        struct osc_lock *lock = cl2osc_lock(slice);
 
@@ -1093,6 +1109,7 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
        osc_lvb_print(env, cookie, p, &lock->ols_lvb);
        return 0;
 }
+EXPORT_SYMBOL(osc_lock_print);
 
 static const struct cl_lock_operations osc_lock_ops = {
         .clo_fini    = osc_lock_fini,
@@ -1111,7 +1128,7 @@ static void osc_lock_lockless_cancel(const struct lu_env *env,
 
        LASSERT(ols->ols_dlmlock == NULL);
        result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
-                               descr->cld_mode, 0);
+                               descr->cld_mode, false);
         if (result)
                 CERROR("Pages for lockless lock %p were not purged(%d)\n",
                        ols, result);
@@ -1126,9 +1143,8 @@ static const struct cl_lock_operations osc_lock_lockless_ops = {
         .clo_print     = osc_lock_print
 };
 
-static void osc_lock_set_writer(const struct lu_env *env,
-                               const struct cl_io *io,
-                               struct cl_object *obj, struct osc_lock *oscl)
+void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
+                        struct cl_object *obj, struct osc_lock *oscl)
 {
        struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
        pgoff_t io_start;
@@ -1141,17 +1157,14 @@ static void osc_lock_set_writer(const struct lu_env *env,
                io_start = cl_index(obj, io->u.ci_rw.crw_pos);
                io_end = cl_index(obj, io->u.ci_rw.crw_pos +
                                                io->u.ci_rw.crw_count - 1);
-               if (cl_io_is_append(io)) {
-                       io_start = 0;
-                       io_end = CL_PAGE_EOF;
-               }
        } else {
                LASSERT(cl_io_is_mkwrite(io));
                io_start = io_end = io->u.ci_fault.ft_index;
        }
 
        if (descr->cld_mode >= CLM_WRITE &&
-           descr->cld_start <= io_start && descr->cld_end >= io_end) {
+           (cl_io_is_append(io) ||
+            (descr->cld_start <= io_start && descr->cld_end >= io_end))) {
                struct osc_io *oio = osc_env_io(env);
 
                /* There must be only one lock to match the write region */
@@ -1159,6 +1172,7 @@ static void osc_lock_set_writer(const struct lu_env *env,
                oio->oi_write_osclock = oscl;
        }
 }
+EXPORT_SYMBOL(osc_lock_set_writer);
 
 int osc_lock_init(const struct lu_env *env,
                  struct cl_object *obj, struct cl_lock *lock,
@@ -1176,15 +1190,23 @@ int osc_lock_init(const struct lu_env *env,
        INIT_LIST_HEAD(&oscl->ols_waiting_list);
        INIT_LIST_HEAD(&oscl->ols_wait_entry);
        INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
+       oscl->ols_lockless_ops = &osc_lock_lockless_ops;
+
+       /* Speculative lock requests must be either no_expand or glimpse
+        * request (CEF_GLIMPSE).  non-glimpse no_expand speculative extent
+        * locks will break ofd_intent_cb. (see comment there)*/
+       LASSERT(ergo((enqflags & CEF_SPECULATIVE) != 0,
+               (enqflags & (CEF_LOCK_NO_EXPAND | CEF_GLIMPSE)) != 0));
 
        oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
-       oscl->ols_agl = !!(enqflags & CEF_AGL);
-       if (oscl->ols_agl)
-               oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+       oscl->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
+
        if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
                oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
                oscl->ols_glimpse = 1;
        }
+       if (io->ci_ndelay && cl_object_same(io->ci_obj, obj))
+               oscl->ols_flags |= LDLM_FL_NDELAY;
        osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo);
 
        cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
@@ -1208,9 +1230,10 @@ int osc_lock_init(const struct lu_env *env,
  * Finds an existing lock covering given index and optionally different from a
  * given \a except lock.
  */
-struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
-                                      struct osc_object *obj, pgoff_t index,
-                                      enum osc_dap_flags dap_flags)
+struct ldlm_lock *osc_obj_dlmlock_at_pgoff(const struct lu_env *env,
+                                          struct osc_object *obj,
+                                          pgoff_t index,
+                                          enum osc_dap_flags dap_flags)
 {
        struct osc_thread_info *info = osc_env_info(env);
        struct ldlm_res_id *resname = &info->oti_resname;