* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Implementation of cl_lock for OSC layer.
*
#define DEBUG_SUBSYSTEM S_OSC
-#include <libcfs/libcfs.h>
/* fid_build_reg_res_name() */
#include <lustre_fid.h>
+#include <lustre_osc.h>
-#include "osc_cl_internal.h"
+#include "osc_internal.h"
/** \addtogroup osc
* @{
*/
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-static const struct cl_lock_operations osc_lock_ops;
-static const struct cl_lock_operations osc_lock_lockless_ops;
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force);
-
-int osc_lock_is_lockless(const struct osc_lock *olck)
-{
- return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
-}
-
/**
* Returns a weak pointer to the ldlm lock identified by a handle. Returned
* pointer cannot be dereferenced, as lock is not protected from concurrent
/**
* Invariant that has to be true all of the time.
*/
-static int osc_lock_invariant(struct osc_lock *ols)
+static inline int osc_lock_invariant(struct osc_lock *ols)
{
struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
struct ldlm_lock *olock = ols->ols_dlmlock;
if (! ergo(ols->ols_state == OLS_GRANTED,
olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
+ ldlm_is_granted(olock) &&
ols->ols_hold))
return 0;
return 1;
*
*/
-static void osc_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
+void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
+EXPORT_SYMBOL(osc_lock_fini);
static void osc_lock_build_policy(const struct lu_env *env,
const struct cl_lock *lock,
policy->l_extent.gid = d->cld_gid;
}
-static __u64 osc_enq2ldlm_flags(__u32 enqflags)
-{
- __u64 result = 0;
-
- LASSERT((enqflags & ~CEF_MASK) == 0);
-
- if (enqflags & CEF_NONBLOCK)
- result |= LDLM_FL_BLOCK_NOWAIT;
- if (enqflags & CEF_ASYNC)
- result |= LDLM_FL_HAS_INTENT;
- if (enqflags & CEF_DISCARD_DATA)
- result |= LDLM_FL_AST_DISCARD_DATA;
- if (enqflags & CEF_PEEK)
- result |= LDLM_FL_TEST_LOCK;
- if (enqflags & CEF_LOCK_MATCH)
- result |= LDLM_FL_MATCH_LOCK;
- return result;
-}
-
/**
* Updates object attributes from a lock value block (lvb) received together
* with the DLM lock reply from the server. Copy of osc_update_enqueue()
* logic.
*
- * This can be optimized to not update attributes when lock is a result of a
- * local match.
- *
* Called under lock and resource spin-locks.
*/
-static void osc_lock_lvb_update(const struct lu_env *env,
- struct osc_object *osc,
- struct ldlm_lock *dlmlock,
- struct ost_lvb *lvb)
+void osc_lock_lvb_update(const struct lu_env *env,
+ struct osc_object *osc,
+ struct ldlm_lock *dlmlock,
+ struct ost_lvb *lvb)
{
- struct cl_object *obj = osc2cl(osc);
- struct lov_oinfo *oinfo = osc->oo_oinfo;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- unsigned valid;
+ struct cl_object *obj = osc2cl(osc);
+ struct lov_oinfo *oinfo = osc->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ unsigned valid, setkms = 0;
ENTRY;
if (size > dlmlock->l_policy_data.l_extent.end)
size = dlmlock->l_policy_data.l_extent.end + 1;
if (size >= oinfo->loi_kms) {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu"
- ", kms=%llu", lvb->lvb_size, size);
valid |= CAT_KMS;
attr->cat_kms = size;
- } else {
- LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
- "%llu; leaving kms=%llu, end=%llu",
- lvb->lvb_size, oinfo->loi_kms,
- dlmlock->l_policy_data.l_extent.end);
+ setkms = 1;
}
ldlm_lock_allow_match_locked(dlmlock);
}
+ /* The size should not be less than the kms */
+ if (attr->cat_size < oinfo->loi_kms)
+ attr->cat_size = oinfo->loi_kms;
+
+ LDLM_DEBUG(dlmlock, "acquired size %llu, setting rss=%llu;%s "
+ "kms=%llu, end=%llu", lvb->lvb_size, attr->cat_size,
+ setkms ? "" : " leaving",
+ setkms ? attr->cat_kms : oinfo->loi_kms,
+ dlmlock ? dlmlock->l_policy_data.l_extent.end : -1ull);
+
cl_object_attr_update(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
- struct lustre_handle *lockh, bool lvb_update)
+ struct lustre_handle *lockh)
{
+ struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
struct ldlm_lock *dlmlock;
dlmlock = ldlm_handle2lock_long(lockh, 0);
/* lock reference taken by ldlm_handle2lock_long() is
* owned by osc_lock and released in osc_lock_detach()
*/
- lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ lu_ref_add_atomic(&dlmlock->l_reference, "osc_lock", oscl);
oscl->ols_has_ref = 1;
LASSERT(oscl->ols_dlmlock == NULL);
/* Lock must have been granted. */
lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ if (ldlm_is_granted(dlmlock)) {
struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
/* extend the lock extent, otherwise it will have problem when
* we decide whether to grant a lockless lock. */
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_start = ext->start >> PAGE_SHIFT;
+ descr->cld_end = ext->end >> PAGE_SHIFT;
descr->cld_gid = ext->gid;
/* no lvb update for matched lock */
- if (lvb_update) {
+ if (!ldlm_is_lvb_cached(dlmlock)) {
LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
- osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
- dlmlock, NULL);
+ LASSERT(osc == dlmlock->l_ast_data);
+ osc_lock_lvb_update(env, osc, dlmlock, NULL);
+ ldlm_set_lvb_cached(dlmlock);
}
LINVRNT(osc_lock_invariant(oscl));
}
struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
int rc;
- __u16 refcheck;
ENTRY;
- env = cl_env_get(&refcheck);
+ env = cl_env_percpu_get();
/* should never happen, similar to osc_ldlm_blocking_ast(). */
LASSERT(!IS_ERR(env));
}
if (rc == 0)
- osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
+ osc_lock_granted(env, oscl, lockh);
/* Error handling, some errors are tolerable. */
- if (oscl->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
-
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, oscl, 1);
- oscl->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
+ if (oscl->ols_glimpse && rc == -ENAVAIL) {
LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
NULL, &oscl->ols_lvb);
/* Hide the error. */
rc = 0;
+ } else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
+ rc = -EAGAIN;
}
if (oscl->ols_owner != NULL)
cl_sync_io_note(env, oscl->ols_owner, rc);
- cl_env_put(env, &refcheck);
+ cl_env_percpu_put(env);
RETURN(rc);
}
-static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
- int errcode)
+static int osc_lock_upcall_speculative(void *cookie,
+ struct lustre_handle *lockh,
+ int errcode)
{
struct osc_object *osc = cookie;
struct ldlm_lock *dlmlock;
LASSERT(dlmlock != NULL);
lock_res_and_lock(dlmlock);
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+ LASSERT(ldlm_is_granted(dlmlock));
- /* there is no osc_lock associated with AGL lock */
+ /* there is no osc_lock associated with speculative locks
+ * thus no need to set LDLM_FL_LVB_CACHED */
osc_lock_lvb_update(env, osc, dlmlock, NULL);
unlock_res_and_lock(dlmlock);
}
static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
- enum cl_lock_mode mode, int discard)
+ enum cl_lock_mode mode, bool discard)
{
struct lu_env *env;
__u16 refcheck;
rc = 0;
}
- rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+ /*
+ * Do not try to match other locks with CLM_WRITE since we already
+ * know there're none
+ */
+ rc2 = osc_lock_discard_pages(env, obj, start, end,
+ mode == CLM_WRITE || discard);
if (rc == 0 && rc2 < 0)
rc = rc2;
{
struct cl_object *obj = NULL;
int result = 0;
- int discard;
+ bool discard;
enum cl_lock_mode mode = CLM_READ;
ENTRY;
LASSERT(flag == LDLM_CB_CANCELING);
lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+ if (!ldlm_is_granted(dlmlock)) {
dlmlock->l_ast_data = NULL;
unlock_res_and_lock(dlmlock);
RETURN(0);
if (dlmlock->l_ast_data != NULL) {
obj = osc2cl(dlmlock->l_ast_data);
- dlmlock->l_ast_data = NULL;
-
cl_object_get(obj);
}
unlock_res_and_lock(dlmlock);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_CANCEL, 5);
+
/* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
* the object has been destroyed. */
if (obj != NULL) {
/* Destroy pages covered by the extent of the DLM lock */
result = osc_lock_flush(cl2osc(obj),
- cl_index(obj, extent->start),
- cl_index(obj, extent->end),
+ extent->start >> PAGE_SHIFT,
+ extent->end >> PAGE_SHIFT,
mode, discard);
/* losing a lock, update kms */
lock_res_and_lock(dlmlock);
+ /* clearing l_ast_data after flushing data,
+ * to let glimpse ast find the lock and the object */
+ dlmlock->l_ast_data = NULL;
cl_object_attr_lock(obj);
/* Must get the value under the lock to avoid race. */
old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
RETURN(result);
}
-static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
+int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
struct lu_env *env;
struct ost_lvb *lvb;
struct req_capsule *cap;
struct cl_object *obj = NULL;
+ struct ldlm_resource *res = dlmlock->l_resource;
+ struct ldlm_match_data matchdata = { 0 };
+ union ldlm_policy_data policy;
+ enum ldlm_mode mode = LCK_PW | LCK_GROUP | LCK_PR;
int result;
__u16 refcheck;
if (IS_ERR(env))
GOTO(out, result = PTR_ERR(env));
+ policy.l_extent.start = 0;
+ policy.l_extent.end = LUSTRE_EOF;
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_ast_data != NULL) {
- obj = osc2cl(dlmlock->l_ast_data);
- cl_object_get(obj);
+ matchdata.lmd_mode = &mode;
+ matchdata.lmd_policy = &policy;
+ matchdata.lmd_flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING;
+ matchdata.lmd_match = LDLM_MATCH_UNREF | LDLM_MATCH_AST_ANY;
+
+ LDLM_LOCK_GET(dlmlock);
+
+ /* If any dlmlock has l_ast_data set, we must find it or we risk
+ * missing a size update done under a different lock.
+ */
+ while (dlmlock) {
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_RELEASE(dlmlock);
+
+ dlmlock = NULL;
+
+ if (obj == NULL && res->lr_type == LDLM_EXTENT) {
+ if (CFS_FAIL_CHECK(OBD_FAIL_OSC_NO_SIZE_DATA))
+ break;
+
+ lock_res(res);
+ dlmlock = search_itree(res, &matchdata);
+ unlock_res(res);
+ }
}
- unlock_res_and_lock(dlmlock);
if (obj != NULL) {
/* Do not grab the mutex of cl_lock for glimpse.
req->rq_status = result;
RETURN(result);
}
+EXPORT_SYMBOL(osc_ldlm_glimpse_ast);
-static int weigh_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
+static bool weigh_cb(const struct lu_env *env, struct cl_io *io,
+ void **pvec, int count, void *cbdata)
{
- struct cl_page *page = ops->ops_cl.cpl_page;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct osc_page *ops = pvec[i];
+ struct cl_page *page = ops->ops_cl.cpl_page;
- if (cl_page_is_vmlocked(env, page)
- || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
- )
- return CLP_GANG_ABORT;
+ if (PageLocked(page->cp_vmpage) ||
+ PageDirty(page->cp_vmpage) ||
+ PageWriteback(page->cp_vmpage))
+ return false;
- *(pgoff_t *)cbdata = osc_index(ops) + 1;
- return CLP_GANG_OKAY;
+ *(pgoff_t *)cbdata = osc_index(ops) + 1;
+ }
+ return true;
}
static unsigned long osc_lock_weight(const struct lu_env *env,
struct osc_object *oscobj,
- struct ldlm_extent *extent)
+ loff_t start, loff_t end)
{
- struct cl_io *io = &osc_env_info(env)->oti_io;
+ struct cl_io *io = osc_env_thread_io(env);
struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
- pgoff_t page_index;
- int result;
+ pgoff_t page_index;
+ int result;
+
ENTRY;
io->ci_obj = obj;
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (result != 0)
- RETURN(result);
+ RETURN(1);
- page_index = cl_index(obj, extent->start);
- do {
- result = osc_page_gang_lookup(env, io, oscobj,
- page_index,
- cl_index(obj, extent->end),
- weigh_cb, (void *)&page_index);
- if (result == CLP_GANG_ABORT)
- break;
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
+ page_index = start >> PAGE_SHIFT;
+
+ if (!osc_page_gang_lookup(env, io, oscobj,
+ page_index, end >> PAGE_SHIFT,
+ weigh_cb, (void *)&page_index))
+ result = 1;
cl_io_fini(env, io);
- return result == CLP_GANG_ABORT ? 1 : 0;
+ return result;
}
/**
*/
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- struct lu_env *env;
- struct osc_object *obj;
- struct osc_lock *oscl;
- unsigned long weight;
- bool found = false;
- __u16 refcheck;
+ struct lu_env *env;
+ struct osc_object *obj;
+ struct osc_lock *oscl;
+ unsigned long weight;
+ bool found = false;
+ __u16 refcheck;
+
ENTRY;
might_sleep();
/* Mostly because lack of memory, do not eliminate this lock */
RETURN(1);
- LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
+ LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT ||
+ dlmlock->l_resource->lr_type == LDLM_IBITS);
+
+ lock_res_and_lock(dlmlock);
obj = dlmlock->l_ast_data;
+ if (obj)
+ cl_object_get(osc2cl(obj));
+ unlock_res_and_lock(dlmlock);
+
if (obj == NULL)
- GOTO(out, weight = 1);
+ GOTO(out, weight = 0);
spin_lock(&obj->oo_ol_spin);
list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
- if (oscl->ols_dlmlock != NULL && oscl->ols_dlmlock != dlmlock)
- continue;
- found = true;
+ if (oscl->ols_dlmlock == dlmlock) {
+ found = true;
+ break;
+ }
}
spin_unlock(&obj->oo_ol_spin);
if (found) {
GOTO(out, weight = 1);
}
- weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
+ if (dlmlock->l_resource->lr_type == LDLM_EXTENT)
+ weight = osc_lock_weight(env, obj,
+ dlmlock->l_policy_data.l_extent.start,
+ dlmlock->l_policy_data.l_extent.end);
+ else if (ldlm_has_dom(dlmlock))
+ weight = osc_lock_weight(env, obj, 0, OBD_OBJECT_EOF);
+ /* The DOM bit can be cancelled at any time; in that case, we know
+ * there are no pages, so just return weight of 0
+ */
+ else
+ weight = 0;
+
EXIT;
out:
+ if (obj)
+ cl_object_put(env, osc2cl(obj));
+
cl_env_put(env, &refcheck);
return weight;
}
+EXPORT_SYMBOL(osc_ldlm_weigh_ast);
static void osc_lock_build_einfo(const struct lu_env *env,
const struct cl_lock *lock,
* Additional policy can be implemented here, e.g., never do lockless-io
* for large extents.
*/
-static void osc_lock_to_lockless(const struct lu_env *env,
- struct osc_lock *ols, int force)
+void osc_lock_to_lockless(const struct lu_env *env,
+ struct osc_lock *ols, int force)
{
- struct cl_lock_slice *slice = &ols->ols_cl;
-
- LASSERT(ols->ols_state == OLS_NEW ||
- ols->ols_state == OLS_UPCALL_RECEIVED);
-
- if (force) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- } else {
- struct osc_io *oio = osc_env_io(env);
- struct cl_io *io = oio->oi_cl.cis_io;
- struct cl_object *obj = slice->cls_obj;
- struct osc_object *oob = cl2osc(obj);
- const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
- struct obd_connect_data *ocd;
-
- LASSERT(io->ci_lockreq == CILR_MANDATORY ||
- io->ci_lockreq == CILR_MAYBE ||
- io->ci_lockreq == CILR_NEVER);
-
- ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
- ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
- (io->ci_lockreq == CILR_MAYBE) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
- if (io->ci_lockreq == CILR_NEVER ||
- /* lockless IO */
- (ols->ols_locklessable && osc_object_is_contended(oob)) ||
- /* lockless truncate */
- (cl_io_is_trunc(io) &&
- (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
- osd->od_lockless_truncate)) {
- ols->ols_locklessable = 1;
- slice->cls_ops = &osc_lock_lockless_ops;
- }
- }
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
+ struct cl_lock_slice *slice = &ols->ols_cl;
+ struct osc_io *oio = osc_env_io(env);
+ struct cl_io *io = oio->oi_cl.cis_io;
+ struct cl_object *obj = slice->cls_obj;
+ struct osc_object *oob = cl2osc(obj);
+ struct obd_connect_data *ocd;
+
+ LASSERT(ols->ols_state == OLS_NEW ||
+ ols->ols_state == OLS_UPCALL_RECEIVED);
+
+ if (force) {
+ ols->ols_locklessable = 1;
+ slice->cls_ops = ols->ols_lockless_ops;
+ } else {
+ LASSERT(io->ci_lockreq == CILR_MANDATORY ||
+ io->ci_lockreq == CILR_MAYBE ||
+ io->ci_lockreq == CILR_NEVER);
+
+ ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
+ ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
+ (io->ci_lockreq == CILR_MAYBE) &&
+ (ocd->ocd_connect_flags &
+ OBD_CONNECT_SRVLOCK);
+ if (io->ci_lockreq == CILR_NEVER) {
+ ols->ols_locklessable = 1;
+ slice->cls_ops = ols->ols_lockless_ops;
+ }
+ }
+ LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
}
+EXPORT_SYMBOL(osc_lock_to_lockless);
static bool osc_lock_compatible(const struct osc_lock *qing,
const struct osc_lock *qed)
struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
- if (qed->ols_glimpse)
+ if (qed->ols_glimpse || qed->ols_speculative)
return true;
if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
return false;
}
-static void osc_lock_wake_waiters(const struct lu_env *env,
- struct osc_object *osc,
- struct osc_lock *oscl)
+void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
+ struct osc_lock *oscl)
{
+ struct osc_lock *scan;
+
spin_lock(&osc->oo_ol_spin);
list_del_init(&oscl->ols_nextlock_oscobj);
spin_unlock(&osc->oo_ol_spin);
spin_lock(&oscl->ols_lock);
- while (!list_empty(&oscl->ols_waiting_list)) {
- struct osc_lock *scan;
-
- scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
- ols_wait_entry);
+ while ((scan = list_first_entry_or_null(&oscl->ols_waiting_list,
+ struct osc_lock,
+ ols_wait_entry)) != NULL) {
list_del_init(&scan->ols_wait_entry);
cl_sync_io_note(env, scan->ols_owner, 0);
}
spin_unlock(&oscl->ols_lock);
}
+EXPORT_SYMBOL(osc_lock_wake_waiters);
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- struct osc_object *obj, struct osc_lock *oscl)
+int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
+ struct osc_lock *oscl)
{
struct osc_lock *tmp_oscl;
struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
int rc = 0;
+
ENTRY;
spin_lock(&obj->oo_ol_spin);
continue;
/* wait for conflicting lock to be canceled */
- cl_sync_io_init(waiter, 1, cl_sync_io_end);
+ cl_sync_io_init(waiter, 1);
oscl->ols_owner = waiter;
spin_lock(&tmp_oscl->ols_lock);
RETURN(rc);
}
+EXPORT_SYMBOL(osc_lock_enqueue_wait);
/**
* Implementation of cl_lock_operations::clo_enqueue() method for osc
struct osc_io *oio = osc_env_io(env);
struct osc_object *osc = cl2osc(slice->cls_obj);
struct osc_lock *oscl = cl2osc_lock(slice);
+ struct obd_export *exp = osc_export(osc);
struct cl_lock *lock = slice->cls_lock;
struct ldlm_res_id *resname = &info->oti_resname;
union ldlm_policy_data *policy = &info->oti_policy;
ENTRY;
LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, oscl);
+ "lock = %px, ols = %px\n", lock, oscl);
if (oscl->ols_state == OLS_GRANTED)
RETURN(0);
+ if ((oscl->ols_flags & LDLM_FL_NO_EXPANSION) &&
+ !exp_connect_lockahead(exp)) {
+ result = -EOPNOTSUPP;
+ CERROR("%s: server does not support lockahead/locknoexpand: rc = %d\n",
+ exp->exp_obd->obd_name, result);
+ RETURN(result);
+ }
+
if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
GOTO(enqueue_base, 0);
- if (oscl->ols_glimpse) {
- LASSERT(equi(oscl->ols_agl, anchor == NULL));
+ /* For glimpse and/or speculative locks, do not wait for reply from
+ * server on LDLM request */
+ if (oscl->ols_glimpse || oscl->ols_speculative) {
+ /* Speculative and glimpse locks do not have an anchor */
+ LASSERT(equi(oscl->ols_speculative, anchor == NULL));
async = true;
GOTO(enqueue_base, 0);
}
/**
* DLM lock's ast data must be osc_object;
- * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
+ * if glimpse or speculative lock, async of osc_enqueue_base()
+ * must be true
+ *
+ * For non-speculative locks:
* DLM's enqueue callback set to osc_lock_upcall() with cookie as
* osc_lock.
+ * For speculative locks:
+ * osc_lock_upcall_speculative & cookie is the osc object, since
+ * there is no osc_lock
*/
ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
osc_lock_build_policy(env, lock, policy);
- if (oscl->ols_agl) {
+ if (oscl->ols_speculative) {
oscl->ols_einfo.ei_cbdata = NULL;
/* hold a reference for callback */
cl_object_get(osc2cl(osc));
- upcall = osc_lock_upcall_agl;
+ upcall = osc_lock_upcall_speculative;
cookie = osc;
}
- result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
+ result = osc_enqueue_base(exp, resname, &oscl->ols_flags,
policy, &oscl->ols_lvb,
- osc->oo_oinfo->loi_kms_valid,
upcall, cookie,
&oscl->ols_einfo, PTLRPCD_SET, async,
- oscl->ols_agl);
+ oscl->ols_speculative);
if (result == 0) {
if (osc_lock_is_lockless(oscl)) {
oio->oi_lockless = 1;
} else if (!async) {
+ if (CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_IDLE_RACE)) {
+ CFS_RACE(OBD_FAIL_PTLRPC_IDLE_RACE);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) / 2);
+ }
LASSERT(oscl->ols_state == OLS_GRANTED);
LASSERT(oscl->ols_hold);
LASSERT(oscl->ols_dlmlock != NULL);
}
- } else if (oscl->ols_agl) {
+ } else if (oscl->ols_speculative) {
cl_object_put(env, osc2cl(osc));
- result = 0;
+ if (oscl->ols_glimpse) {
+ /* hide error for AGL request */
+ result = 0;
+ }
}
out:
*
* - cancels ldlm lock (ldlm_cli_cancel()).
*/
-static void osc_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+void osc_lock_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
{
struct osc_object *obj = cl2osc(slice->cls_obj);
struct osc_lock *oscl = cl2osc_lock(slice);
osc_lock_wake_waiters(env, obj, oscl);
EXIT;
}
+EXPORT_SYMBOL(osc_lock_cancel);
-static int osc_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
+int osc_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice)
{
struct osc_lock *lock = cl2osc_lock(slice);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
return 0;
}
+EXPORT_SYMBOL(osc_lock_print);
static const struct cl_lock_operations osc_lock_ops = {
.clo_fini = osc_lock_fini,
{
struct osc_lock *ols = cl2osc_lock(slice);
struct osc_object *osc = cl2osc(slice->cls_obj);
- struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
- int result;
LASSERT(ols->ols_dlmlock == NULL);
- result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
- descr->cld_mode, 0);
- if (result)
- CERROR("Pages for lockless lock %p were not purged(%d)\n",
- ols, result);
-
osc_lock_wake_waiters(env, osc, ols);
}
.clo_print = osc_lock_print
};
-static void osc_lock_set_writer(const struct lu_env *env,
- const struct cl_io *io,
- struct cl_object *obj, struct osc_lock *oscl)
+void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
+ struct cl_object *obj, struct osc_lock *oscl)
{
struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
pgoff_t io_start;
return;
if (likely(io->ci_type == CIT_WRITE)) {
- io_start = cl_index(obj, io->u.ci_rw.crw_pos);
- io_end = cl_index(obj, io->u.ci_rw.crw_pos +
- io->u.ci_rw.crw_count - 1);
- if (cl_io_is_append(io)) {
- io_start = 0;
- io_end = CL_PAGE_EOF;
- }
+ io_start = io->u.ci_rw.crw_pos >> PAGE_SHIFT;
+ io_end = (io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_bytes - 1) >> PAGE_SHIFT;
} else {
LASSERT(cl_io_is_mkwrite(io));
io_start = io_end = io->u.ci_fault.ft_index;
}
if (descr->cld_mode >= CLM_WRITE &&
- descr->cld_start <= io_start && descr->cld_end >= io_end) {
+ (cl_io_is_append(io) ||
+ (descr->cld_start <= io_start && descr->cld_end >= io_end))) {
struct osc_io *oio = osc_env_io(env);
/* There must be only one lock to match the write region */
oio->oi_write_osclock = oscl;
}
}
+EXPORT_SYMBOL(osc_lock_set_writer);
+
+void osc_lock_set_reader(const struct lu_env *env, const struct cl_io *io,
+ struct cl_object *obj, struct osc_lock *oscl)
+{
+ struct osc_io *oio = osc_env_io(env);
+
+ if (!cl_object_same(io->ci_obj, obj))
+ return;
+
+ if (oscl->ols_glimpse || osc_lock_is_lockless(oscl))
+ return;
+
+ if (oio->oi_read_osclock == NULL)
+ oio->oi_read_osclock = oscl;
+}
+EXPORT_SYMBOL(osc_lock_set_reader);
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
INIT_LIST_HEAD(&oscl->ols_waiting_list);
INIT_LIST_HEAD(&oscl->ols_wait_entry);
INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
+ oscl->ols_lockless_ops = &osc_lock_lockless_ops;
+
+ /* Speculative lock requests must be either no_expand or glimpse
+ * request (CEF_GLIMPSE). non-glimpse no_expand speculative extent
+ * locks will break ofd_intent_cb. (see comment there)*/
+ LASSERT(ergo((enqflags & CEF_SPECULATIVE) != 0,
+ (enqflags & (CEF_LOCK_NO_EXPAND | CEF_GLIMPSE)) != 0));
oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
- oscl->ols_agl = !!(enqflags & CEF_AGL);
- if (oscl->ols_agl)
- oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ oscl->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
+
if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
oscl->ols_glimpse = 1;
}
+ if (io->ci_ndelay && cl_object_same(io->ci_obj, obj))
+ oscl->ols_flags |= LDLM_FL_NDELAY;
osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo);
cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
if (!(enqflags & CEF_MUST))
/* try to convert this lock to a lockless lock */
osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
- if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
osc_lock_set_writer(env, io, obj, oscl);
+ else if (io->ci_type == CIT_READ ||
+ (io->ci_type == CIT_FAULT && !io->u.ci_fault.ft_mkwrite))
+ osc_lock_set_reader(env, io, obj, oscl);
LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %#llx",
lock, oscl, oscl->ols_flags);
* Finds an existing lock covering given index and optionally different from a
* given \a except lock.
*/
-struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
- struct osc_object *obj, pgoff_t index,
- enum osc_dap_flags dap_flags)
+struct ldlm_lock *osc_obj_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj,
+ pgoff_t index,
+ enum osc_dap_flags dap_flags)
{
struct osc_thread_info *info = osc_env_info(env);
struct ldlm_res_id *resname = &info->oti_resname;
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
__u64 flags;
+ enum ldlm_match_flags match_flags = 0;
ENTRY;
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
if (dap_flags & OSC_DAP_FL_TEST_LOCK)
flags |= LDLM_FL_TEST_LOCK;
+
+ if (dap_flags & OSC_DAP_FL_AST)
+ match_flags |= LDLM_MATCH_AST;
+
+ if (dap_flags & OSC_DAP_FL_CANCELING)
+ match_flags |= LDLM_MATCH_UNREF;
+
+ if (dap_flags & OSC_DAP_FL_RIGHT)
+ match_flags |= LDLM_MATCH_RIGHT;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too
*/
again:
- mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
- dap_flags & OSC_DAP_FL_CANCELING);
+ mode = osc_match_base(env, osc_export(obj), resname, LDLM_EXTENT,
+ policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
+ obj, &lockh, match_flags);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */