-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Nikita Danilov <nikita.danilov@sun.com>
*/
-/** \addtogroup osc osc @{ */
-
#define DEBUG_SUBSYSTEM S_OSC
#ifdef __KERNEL__
#include "osc_cl_internal.h"
+/** \addtogroup osc
+ * @{
+ */
+
+#define _PAGEREF_MAGIC (-10000000)
+
/*****************************************************************************
*
* Type conversions.
static const struct cl_lock_operations osc_lock_lockless_ops;
static void osc_lock_to_lockless(const struct lu_env *env,
struct osc_lock *ols, int force);
+static int osc_lock_has_pages(struct osc_lock *olck);
int osc_lock_is_lockless(const struct osc_lock *olck)
{
*/
static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
- struct ldlm_lock *dlmlock;
+ struct ldlm_lock *dlmlock;
- spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (dlmlock == NULL) {
- spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ dlmlock = olck->ols_lock;
+ if (dlmlock == NULL) {
+ spin_unlock(&osc_ast_guard);
return;
}
* call to osc_lock_detach() */
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
+ spin_unlock(&osc_ast_guard);
lock_res_and_lock(dlmlock);
if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
struct cl_object *obj = olck->ols_cl.cls_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ __u64 old_kms;
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid possible races. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
/* Update the kms. Need to loop all granted locks.
* Not a problem for the client */
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- unlock_res_and_lock(dlmlock);
- cl_object_attr_lock(obj);
cl_object_attr_set(env, obj, attr, CAT_KMS);
cl_object_attr_unlock(obj);
- } else
- unlock_res_and_lock(dlmlock);
+ }
+ unlock_res_and_lock(dlmlock);
/* release a reference taken in osc_lock_upcall0(). */
+ LASSERT(olck->ols_has_ref);
lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
LDLM_LOCK_RELEASE(dlmlock);
+ olck->ols_has_ref = 0;
+}
+
+static int osc_lock_unhold(struct osc_lock *ols)
+{
+ int result = 0;
+
+ if (ols->ols_hold) {
+ ols->ols_hold = 0;
+ result = osc_cancel_base(&ols->ols_handle,
+ ols->ols_einfo.ei_mode);
+ }
+ return result;
}
static int osc_lock_unuse(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
- int result;
- LASSERT(ols->ols_state == OLS_GRANTED ||
- ols->ols_state == OLS_UPCALL_RECEIVED);
LINVRNT(osc_lock_invariant(ols));
- if (ols->ols_glimpse) {
- LASSERT(ols->ols_hold == 0);
+ switch (ols->ols_state) {
+ case OLS_NEW:
+ LASSERT(!ols->ols_hold);
+ LASSERT(ols->ols_agl);
return 0;
+ case OLS_UPCALL_RECEIVED:
+ osc_lock_unhold(ols);
+ case OLS_ENQUEUED:
+ LASSERT(!ols->ols_hold);
+ osc_lock_detach(env, ols);
+ ols->ols_state = OLS_NEW;
+ return 0;
+ case OLS_GRANTED:
+ LASSERT(!ols->ols_glimpse);
+ LASSERT(ols->ols_hold);
+ /*
+ * Move lock into OLS_RELEASED state before calling
+ * osc_cancel_base() so that possible synchronous cancellation
+ * (that always happens e.g., for liblustre) sees that lock is
+ * released.
+ */
+ ols->ols_state = OLS_RELEASED;
+ return osc_lock_unhold(ols);
+ default:
+ CERROR("Impossible state: %d\n", ols->ols_state);
+ LBUG();
}
- LASSERT(ols->ols_hold);
-
- /*
- * Move lock into OLS_RELEASED state before calling osc_cancel_base()
- * so that possible synchronous cancellation (that always happens
- * e.g., for liblustre) sees that lock is released.
- */
- ols->ols_state = OLS_RELEASED;
- ols->ols_hold = 0;
- result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
- ols->ols_has_ref = 0;
- return result;
}
static void osc_lock_fini(const struct lu_env *env,
* to the lock), before reply from a server was received. In this case
* lock is destroyed immediately after upcall.
*/
- if (ols->ols_hold)
- osc_lock_unuse(env, slice);
- if (ols->ols_lock != NULL)
- osc_lock_detach(env, ols);
+ osc_lock_unhold(ols);
+ LASSERT(ols->ols_lock == NULL);
+ LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
+ cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
} else {
/*
* In reality, where ost server expects ->lsm_object_id and
- * ->lsm_object_gr in rename.
+ * ->lsm_object_seq in rename.
*/
- osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
+ osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
resname);
}
}
const struct cl_lock_descr *d = &lock->cll_descr;
osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
+ policy->l_extent.gid = d->cld_gid;
}
-static int osc_enq2ldlm_flags(__u32 enqflags)
+static __u64 osc_enq2ldlm_flags(__u32 enqflags)
{
- int result = 0;
+ __u64 result = 0;
LASSERT((enqflags & ~CEF_MASK) == 0);
static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
{
- struct osc_lock *olck;
+ struct osc_lock *olck;
- lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
+ lock_res_and_lock(dlm_lock);
+ spin_lock(&osc_ast_guard);
olck = dlm_lock->l_ast_data;
if (olck != NULL) {
struct cl_lock *lock = olck->ols_cl.cls_lock;
} else
olck = NULL;
}
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(dlm_lock);
+ return olck;
}
static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
ENTRY;
- if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
- EXIT;
- return;
- }
+ if (!(olck->ols_flags & LDLM_FL_LVB_READY))
+ RETURN_EXIT;
lvb = &olck->ols_lvb;
obj = olck->ols_cl.cls_obj;
LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
ENTRY;
- if (olck->ols_state != OLS_GRANTED) {
+ if (olck->ols_state < OLS_GRANTED) {
lock = olck->ols_cl.cls_lock;
ext = &dlmlock->l_policy_data.l_extent;
descr = &osc_env_info(env)->oti_descr;
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
descr->cld_start = cl_index(descr->cld_obj, ext->start);
descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
/*
* tell upper layers the extent of the lock that was actually
* granted
LASSERT(dlmlock != NULL);
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(olck->ols_lock == NULL);
- olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ LASSERT(dlmlock->l_ast_data == olck);
+ LASSERT(olck->ols_lock == NULL);
+ olck->ols_lock = dlmlock;
+ spin_unlock(&osc_ast_guard);
/*
* Lock might be not yet granted. In this case, completion ast
* this.
*/
ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_hold = olck->ols_has_ref = 1;
+ olck->ols_hold = 1;
/* lock reference taken by ldlm_handle2lock_long() is owned by
* osc_lock and released in osc_lock_detach() */
lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
+ olck->ols_has_ref = 1;
}
/**
*/
static int osc_lock_upcall(void *cookie, int errcode)
{
- struct osc_lock *olck = cookie;
- struct cl_lock_slice *slice = &olck->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
- struct lu_env *env;
-
- int refcheck;
+ struct osc_lock *olck = cookie;
+ struct cl_lock_slice *slice = &olck->ols_cl;
+ struct cl_lock *lock = slice->cls_lock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
ENTRY;
- /*
- * XXX environment should be created in ptlrpcd.
- */
- env = cl_env_get(&refcheck);
+ env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
int rc;
} else if (olck->ols_state == OLS_CANCELLED) {
rc = -EIO;
} else {
- CERROR("Impossible state: %i\n", olck->ols_state);
+ CERROR("Impossible state: %d\n", olck->ols_state);
LBUG();
}
if (rc) {
dlmlock = ldlm_handle2lock(&olck->ols_handle);
if (dlmlock != NULL) {
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(olck->ols_lock == NULL);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ LASSERT(olck->ols_lock == NULL);
+ dlmlock->l_ast_data = NULL;
+ olck->ols_handle.cookie = 0ULL;
+ spin_unlock(&osc_ast_guard);
+ ldlm_lock_fail_match_locked(dlmlock);
unlock_res_and_lock(dlmlock);
LDLM_LOCK_PUT(dlmlock);
}
rc = 0;
}
- if (rc == 0)
- /* on error, lock was signaled by cl_lock_error() */
+ if (rc == 0) {
+ /* For AGL case, the RPC sponsor may exits the cl_lock
+ * processing without wait() called before related OSC
+ * lock upcall(). So update the lock status according
+ * to the enqueue result inside AGL upcall(). */
+ if (olck->ols_agl) {
+ lock->cll_flags |= CLF_FROM_UPCALL;
+ cl_wait_try(env, lock);
+ lock->cll_flags &= ~CLF_FROM_UPCALL;
+ if (!olck->ols_glimpse)
+ olck->ols_agl = 0;
+ }
cl_lock_signal(env, lock);
- else
+ /* del user for lock upcall cookie */
+ cl_unuse_try(env, lock);
+ } else {
+ /* del user for lock upcall cookie */
+ cl_lock_user_del(env, lock);
cl_lock_error(env, lock, rc);
+ }
- cl_lock_mutex_put(env, lock);
-
- /* release cookie reference, acquired by osc_lock_enqueue() */
- lu_ref_del(&lock->cll_reference, "upcall", lock);
- cl_lock_put(env, lock);
- cl_env_put(env, &refcheck);
- } else
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LBUG();
- RETURN(errcode);
+ /* release cookie reference, acquired by osc_lock_enqueue() */
+ cl_lock_hold_release(env, lock, "upcall", lock);
+ cl_lock_mutex_put(env, lock);
+
+ lu_ref_del(&lock->cll_reference, "upcall", lock);
+ /* This maybe the last reference, so must be called after
+ * cl_lock_mutex_put(). */
+ cl_lock_put(env, lock);
+
+ cl_env_nested_put(&nest, env);
+ } else {
+ /* should never happen, similar to osc_ldlm_blocking_ast(). */
+ LBUG();
+ }
+ RETURN(errcode);
}
/**
CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
LASSERT(!osc_lock_is_lockless(olck));
- if (olck->ols_hold)
- /*
- * Lock might be still addref-ed here, if e.g., blocking ast
- * is sent for a failed lock.
- */
- osc_lock_unuse(env, &olck->ols_cl);
+ /*
+ * Lock might be still addref-ed here, if e.g., blocking ast
+ * is sent for a failed lock.
+ */
+ osc_lock_unhold(olck);
if (blocking && olck->ols_state < OLS_BLOCKED)
/*
* new environment has to be created to not corrupt outer context.
*/
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env))
+ if (!IS_ERR(env)) {
result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- else {
+ cl_env_nested_put(&nest, env);
+ } else {
result = PTR_ERR(env);
/*
* XXX This should never happen, as cl_lock is
else
CERROR("BAST failed: %d\n", result);
}
- cl_env_nested_put(&nest, env);
return result;
}
static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- int flags, void *data)
+ __u64 flags, void *data)
{
- struct lu_env *env;
- void *env_cookie;
- struct osc_lock *olck;
- struct cl_lock *lock;
- int refcheck;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct osc_lock *olck;
+ struct cl_lock *lock;
int result;
int dlmrc;
/* first, do dlm part of the work */
dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
/* then, notify cl_lock */
- env_cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
+ env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
olck = osc_ast_data_get(dlmlock);
if (olck != NULL) {
LASSERT(dlmlock->l_lvb_data != NULL);
lock_res_and_lock(dlmlock);
olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (olck->ols_lock == NULL)
+ if (olck->ols_lock == NULL) {
/*
* upcall (osc_lock_upcall()) hasn't yet been
* called. Do nothing now, upcall will bind
* and ldlm_lock are always bound when
* osc_lock is in OLS_GRANTED state.
*/
- ;
- else if (dlmlock->l_granted_mode != LCK_MINMODE)
+ } else if (dlmlock->l_granted_mode ==
+ dlmlock->l_req_mode) {
osc_lock_granted(env, olck, dlmlock, dlmrc);
+ }
unlock_res_and_lock(dlmlock);
- if (dlmrc != 0)
+
+ if (dlmrc != 0) {
+ CL_LOCK_DEBUG(D_ERROR, env, lock,
+ "dlmlock returned %d\n", dlmrc);
cl_lock_error(env, lock, dlmrc);
+ }
cl_lock_mutex_put(env, lock);
osc_ast_data_put(env, olck);
result = 0;
} else
result = -ELDLM_NO_LOCK_DATA;
- cl_env_put(env, &refcheck);
+ cl_env_nested_put(&nest, env);
} else
result = PTR_ERR(env);
- cl_env_reexit(env_cookie);
return dlmrc ?: result;
}
struct osc_lock *olck;
struct cl_lock *lock;
struct cl_object *obj;
+ struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
struct req_capsule *cap;
int result;
- int refcheck;
LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
- env = cl_env_get(&refcheck);
+ env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
- /*
- * osc_ast_data_get() has to go after environment is
+ /* osc_ast_data_get() has to go after environment is
* allocated, because osc_ast_data() acquires a
* reference to a lock, and it can only be released in
* environment.
olck = osc_ast_data_get(dlmlock);
if (olck != NULL) {
lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
+ /* Do not grab the mutex of cl_lock for glimpse.
+ * See LU-1274 for details.
+ * BTW, it's okay for cl_lock to be cancelled during
+ * this period because server can handle this race.
+ * See ldlm_server_glimpse_ast() for details.
+ * cl_lock_mutex_get(env, lock); */
cap = &req->rq_pill;
req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
obj = lock->cll_descr.cld_obj;
result = cl_object_glimpse(env, obj, lvb);
}
- cl_lock_mutex_put(env, lock);
osc_ast_data_put(env, olck);
} else {
/*
lustre_pack_reply(req, 1, NULL, NULL);
result = -ELDLM_NO_LOCK_DATA;
}
- cl_env_put(env, &refcheck);
+ cl_env_nested_put(&nest, env);
} else
result = PTR_ERR(env);
req->rq_status = result;
*/
static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
+ struct cl_env_nest nest;
struct lu_env *env;
- int refcheck;
- void *cookie;
struct osc_lock *lock;
struct cl_lock *cll;
unsigned long weight;
ENTRY;
- might_sleep();
- cookie = cl_env_reenter();
+ cfs_might_sleep();
/*
* osc_ldlm_weigh_ast has a complex context since it might be called
* because of lock canceling, or from user's input. We have to make
* the upper context because cl_lock_put don't modify environment
* variables. But in case of ..
*/
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
/* Mostly because lack of memory, tend to eliminate this lock*/
- cl_env_reexit(cookie);
RETURN(0);
- }
LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
lock = osc_ast_data_get(dlmlock);
EXIT;
out:
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
+ cl_env_nested_put(&nest, env);
return weight;
}
}
/**
- * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
- * is called as a part of enqueuing to cancel conflicting locks early.
- *
- * \retval 0: success, \a conflict was cancelled and destroyed.
- *
- * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
- * released in the process. Repeat enqueing.
- *
- * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
- * either \a lock is non-blocking, or current thread
- * holds other locks, that prevent it from waiting
- * for cancel to complete.
- *
- * \retval -ve: other error, including -EINTR.
- *
- */
-static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock *conflict, int canwait)
-{
- int rc;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(cl_lock_is_mutexed(conflict));
-
- rc = 0;
- if (conflict->cll_state != CLS_FREEING) {
- cl_lock_cancel(env, conflict);
- cl_lock_delete(env, conflict);
- if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
- rc = -EWOULDBLOCK;
- if (cl_lock_nr_mutexed(env) > 2)
- /*
- * If mutices of locks other than @lock and
- * @scan are held by the current thread, it
- * cannot wait on @scan state change in a
- * dead-lock safe matter, so simply skip early
- * cancellation in this case.
- *
- * This means that early cancellation doesn't
- * work when there is even slight mutex
- * contention, as top-lock's mutex is usually
- * held at this time.
- */
- ;
- else if (canwait) {
- /* Waiting for @scan to be destroyed */
- cl_lock_mutex_put(env, lock);
- do {
- rc = cl_lock_state_wait(env, conflict);
- } while (!rc &&
- conflict->cll_state < CLS_FREEING);
- /* mutex was released, repeat enqueue. */
- rc = rc ?: CLO_REPEAT;
- cl_lock_mutex_get(env, lock);
- }
- }
- LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
- CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
- conflict, rc ? "not":"", rc);
- }
- return rc;
-}
-
-/**
* Determine if the lock should be converted into a lockless lock.
*
* Steps to check:
struct osc_lock *ols, int force)
{
struct cl_lock_slice *slice = &ols->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
LASSERT(ols->ols_state == OLS_NEW ||
ols->ols_state == OLS_UPCALL_RECEIVED);
if (force) {
ols->ols_locklessable = 1;
- LASSERT(cl_lock_is_mutexed(lock));
slice->cls_ops = &osc_lock_lockless_ops;
} else {
struct osc_io *oio = osc_env_io(env);
io->ci_lockreq == CILR_NEVER);
ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
- ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
+ ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
(io->ci_lockreq == CILR_MAYBE) &&
(ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
if (io->ci_lockreq == CILR_NEVER ||
/* lockless IO */
(ols->ols_locklessable && osc_object_is_contended(oob)) ||
/* lockless truncate */
- (io->ci_type == CIT_TRUNC &&
+ (cl_io_is_trunc(io) &&
(ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
osd->od_lockless_truncate)) {
ols->ols_locklessable = 1;
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
}
+static int osc_lock_compatible(const struct osc_lock *qing,
+ const struct osc_lock *qed)
+{
+ enum cl_lock_mode qing_mode;
+ enum cl_lock_mode qed_mode;
+
+ qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
+ if (qed->ols_glimpse &&
+ (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
+ return 1;
+
+ qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
+ return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+}
+
/**
* Cancel all conflicting locks and wait for them to be destroyed.
*
struct cl_lock *lock = olck->ols_cl.cls_lock;
struct cl_lock_descr *descr = &lock->cll_descr;
struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure;
struct cl_lock *scan;
- struct cl_lock *temp;
+ struct cl_lock *conflict= NULL;
int lockless = osc_lock_is_lockless(olck);
int rc = 0;
- int canwait;
- int stop;
ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- /*
- * XXX This function could be sped up if we had asynchronous
- * cancellation.
- */
+ /* make it enqueue anyway for glimpse lock, because we actually
+ * don't need to cancel any conflicting locks. */
+ if (olck->ols_glimpse)
+ return 0;
+
+ spin_lock(&hdr->coh_lock_guard);
+ cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+ struct cl_lock_descr *cld = &scan->cll_descr;
+ const struct osc_lock *scan_ols;
- canwait =
- !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
- cl_lock_nr_mutexed(env) == 1;
- cl_lock_closure_init(env, closure, lock, canwait);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
if (scan == lock)
- continue;
+ break;
if (scan->cll_state < CLS_QUEUING ||
scan->cll_state == CLS_FREEING ||
- scan->cll_descr.cld_start > descr->cld_end ||
- scan->cll_descr.cld_end < descr->cld_start)
+ cld->cld_start > descr->cld_end ||
+ cld->cld_end < descr->cld_start)
continue;
/* overlapped and living locks. */
- /* A tricky case for lockless pages:
- * We need to cancel the compatible locks if we're enqueuing
+
+ /* We're not supposed to give up group lock. */
+ if (scan->cll_descr.cld_mode == CLM_GROUP) {
+ LASSERT(descr->cld_mode != CLM_GROUP ||
+ descr->cld_gid != scan->cll_descr.cld_gid);
+ continue;
+ }
+
+ scan_ols = osc_lock_at(scan);
+
+ /* We need to cancel the compatible locks if we're enqueuing
* a lockless lock, for example:
* imagine that client has PR lock on [0, 1000], and thread T0
* is doing lockless IO in [500, 1500] region. Concurrent
* thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale.
- */
- if (!lockless && cl_lock_compatible(scan, lock))
+ * wrong, because these data are possibly stale. */
+ if (!lockless && osc_lock_compatible(olck, scan_ols))
continue;
- /* Now @scan is conflicting with @lock, this means current
- * thread have to sleep for @scan being destroyed. */
cl_lock_get_trust(scan);
- if (&temp->cll_linkage != &hdr->coh_locks)
- cl_lock_get_trust(temp);
- spin_unlock(&hdr->coh_lock_guard);
- lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
-
- LASSERT(list_empty(&closure->clc_list));
- rc = cl_lock_closure_build(env, scan, closure);
- if (rc == 0) {
- rc = osc_lock_cancel_wait(env, lock, scan, canwait);
- cl_lock_disclosure(env, closure);
- if (rc == -EWOULDBLOCK)
- rc = 0;
- }
- if (rc == CLO_REPEAT && !canwait)
- /* cannot wait... no early cancellation. */
- rc = 0;
-
- lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
- cl_lock_put(env, scan);
- spin_lock(&hdr->coh_lock_guard);
- /*
- * Lock list could have been modified, while spin-lock was
- * released. Check that it is safe to continue.
- */
- stop = list_empty(&temp->cll_linkage);
- if (&temp->cll_linkage != &hdr->coh_locks)
- cl_lock_put(env, temp);
- if (stop || rc != 0)
- break;
+ conflict = scan;
+ break;
}
- spin_unlock(&hdr->coh_lock_guard);
- cl_lock_closure_fini(closure);
- RETURN(rc);
-}
+ spin_unlock(&hdr->coh_lock_guard);
-/**
- * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
- *
- * - Thread0: obtains PR:[0, 10]. Lock is busy.
- *
- * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
- * PR:[0, 10], but cancellation of busy lock is postponed.
- *
- * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
- * PW:[5, 50], and thread0 waits for the lock completion never
- * releasing PR:[0, 10]---deadlock.
- *
- * The second PR lock can be glimpse (it is to deal with that situation that
- * ll_glimpse_size() has second argument, preventing local match of
- * not-yet-granted locks, see bug 10295). Similar situation is possible in the
- * case of memory mapped user level buffer.
- *
- * To prevent this we can detect a situation when current "thread" or "io"
- * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
- * the ols->ols_flags, or prevent local match with PW locks.
- */
-static int osc_deadlock_is_possible(const struct lu_env *env,
- struct cl_lock *lock)
-{
- struct cl_object *obj;
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct osc_io *oio;
-
- int result;
-
- ENTRY;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- oio = osc_env_io(env);
- obj = lock->cll_descr.cld_obj;
- head = cl_object_header(obj);
-
- result = 0;
- spin_lock(&head->coh_lock_guard);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != lock) {
- struct osc_lock *oscan;
-
- oscan = osc_lock_at(scan);
- LASSERT(oscan != NULL);
- if (oscan->ols_owner == oio) {
- result = 1;
- break;
- }
+ if (conflict) {
+ if (lock->cll_descr.cld_mode == CLM_GROUP) {
+ /* we want a group lock but a previous lock request
+ * conflicts, we do not wait but return 0 so the
+ * request is send to the server
+ */
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
+ "with %p, no wait, send to server\n",
+ lock, conflict);
+ cl_lock_put(env, conflict);
+ rc = 0;
+ } else {
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
+ "will wait\n",
+ lock, conflict);
+ LASSERT(lock->cll_conflict == NULL);
+ lu_ref_add(&conflict->cll_reference, "cancel-wait",
+ lock);
+ lock->cll_conflict = conflict;
+ rc = CLO_WAIT;
}
}
- spin_unlock(&head->coh_lock_guard);
- RETURN(result);
+ RETURN(rc);
}
/**
* Implementation of cl_lock_operations::clo_enqueue() method for osc
* layer. This initiates ldlm enqueue:
*
- * - checks for possible dead-lock conditions (osc_deadlock_is_possible());
- *
* - cancels conflicting locks early (osc_lock_enqueue_wait());
*
* - calls osc_enqueue_base() to do actual enqueue.
*/
static int osc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
+ struct cl_io *unused, __u32 enqflags)
{
struct osc_lock *ols = cl2osc_lock(slice);
struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
int result;
ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(ols->ols_state == OLS_NEW);
+ LASSERTF(ols->ols_state == OLS_NEW,
+ "Impossible state: %d\n", ols->ols_state);
- osc_lock_build_res(env, obj, resname);
- osc_lock_build_policy(env, lock, policy);
- ols->ols_flags = osc_enq2ldlm_flags(enqflags);
- if (osc_deadlock_is_possible(env, lock))
- ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
- if (ols->ols_flags & LDLM_FL_HAS_INTENT)
- ols->ols_glimpse = 1;
+ LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
+ "lock = %p, ols = %p\n", lock, ols);
result = osc_lock_enqueue_wait(env, ols);
if (result == 0) {
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
if (!osc_lock_is_lockless(ols)) {
- if (ols->ols_locklessable)
- ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
-
- /* a reference for lock, passed as an upcall cookie */
- cl_lock_get(lock);
- lu_ref_add(&lock->cll_reference, "upcall", lock);
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+
+ /* lock will be passed as upcall cookie,
+ * hold ref to prevent to be released. */
+ cl_lock_hold_add(env, lock, "upcall", lock);
+ /* a user for lock also */
+ cl_lock_user_add(env, lock);
ols->ols_state = OLS_ENQUEUED;
/*
* ldlm_lock_match(LDLM_FL_LVB_READY) waits for
* LDLM_CP_CALLBACK.
*/
+ osc_lock_build_res(env, obj, resname);
+ osc_lock_build_policy(env, lock, policy);
result = osc_enqueue_base(osc_export(obj), resname,
&ols->ols_flags, policy,
&ols->ols_lvb,
obj->oo_oinfo->loi_kms_valid,
osc_lock_upcall,
ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1);
+ PTLRPCD_SET, 1, ols->ols_agl);
if (result != 0) {
- lu_ref_del(&lock->cll_reference,
- "upcall", lock);
- cl_lock_put(env, lock);
+ cl_lock_user_del(env, lock);
+ cl_lock_unhold(env, lock, "upcall", lock);
+ if (unlikely(result == -ECANCELED)) {
+ ols->ols_state = OLS_NEW;
+ result = 0;
+ }
}
} else {
ols->ols_state = OLS_GRANTED;
+ ols->ols_owner = osc_env_io(env);
}
}
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
struct cl_lock *lock = olck->ols_cl.cls_lock;
LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
- return 0;
+
+ if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
+ if (olck->ols_flags & LDLM_FL_LVB_READY) {
+ return 0;
+ } else if (olck->ols_agl) {
+ if (lock->cll_flags & CLF_FROM_UPCALL)
+ /* It is from enqueue RPC reply upcall for
+ * updating state. Do not re-enqueue. */
+ return -ENAVAIL;
+ else
+ olck->ols_state = OLS_NEW;
+ } else {
+ LASSERT(lock->cll_error);
+ return lock->cll_error;
+ }
+ }
+
+ if (olck->ols_state == OLS_NEW) {
+ int rc;
+
+ LASSERT(olck->ols_agl);
+ olck->ols_agl = 0;
+ rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
+ if (rc != 0)
+ return rc;
+ else
+ return CLO_REENQUEUED;
+ }
LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
lock->cll_error == 0, olck->ols_lock != NULL));
int rc;
LASSERT(!olck->ols_hold);
+
/*
* Atomically check for LDLM_FL_CBPENDING and addref a lock if this
* flag is not set. This protects us from a concurrent blocking ast.
*/
rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
if (rc == 0) {
- olck->ols_hold = olck->ols_has_ref = 1;
+ olck->ols_hold = 1;
olck->ols_state = OLS_GRANTED;
} else {
struct cl_lock *lock;
* cl_lock mutex.
*/
lock = slice->cls_lock;
- LASSERT(lock->cll_state == CLS_CACHED);
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
LASSERT(lock->cll_users > 0);
- LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
/* set a flag for osc_dlm_blocking_ast0() to signal the
* lock.*/
olck->ols_ast_wait = 1;
static int osc_lock_flush(struct osc_lock *ols, int discard)
{
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_env_nest nest;
- struct lu_env *env;
- int result = 0;
+ struct cl_lock *lock = ols->ols_cl.cls_lock;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ int result = 0;
+ ENTRY;
+
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ int rc = 0;
+
+ if (descr->cld_mode >= CLM_WRITE) {
+ result = osc_cache_writeback_range(env, obj,
+ descr->cld_start, descr->cld_end,
+ 1, discard);
+ CDEBUG(D_DLMTRACE, "write out %d pages for lock %p.\n",
+ result, lock);
+ if (result > 0)
+ result = 0;
+ }
+
+ rc = cl_lock_discard_pages(env, lock);
+ if (result == 0 && rc < 0)
+ result = rc;
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- result = cl_lock_page_out(env, lock, discard);
cl_env_nested_put(&nest, env);
} else
result = PTR_ERR(env);
- if (result == 0)
+ if (result == 0) {
ols->ols_flush = 1;
- return result;
+ LINVRNT(!osc_lock_has_pages(ols));
+ }
+ RETURN(result);
}
/**
if (dlmlock != NULL) {
int do_cancel;
- discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
- result = osc_lock_flush(olck, discard);
- if (olck->ols_hold)
- osc_lock_unuse(env, slice);
+ discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
+ if (olck->ols_state >= OLS_GRANTED)
+ result = osc_lock_flush(olck, discard);
+ osc_lock_unhold(olck);
lock_res_and_lock(dlmlock);
/* Now that we're the only user of dlm read/write reference,
lock, result);
}
olck->ols_state = OLS_CANCELLED;
+ olck->ols_flags &= ~LDLM_FL_LVB_READY;
osc_lock_detach(env, olck);
}
-void cl_lock_page_list_fixup(const struct lu_env *env,
- struct cl_io *io, struct cl_lock *lock,
- struct cl_page_list *queue);
-
#ifdef INVARIANT_CHECK
+static int check_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_lock *lock = cbdata;
+
+ if (lock->cll_descr.cld_mode == CLM_READ) {
+ struct cl_lock *tmp;
+ tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
+ page, lock, 1, 0);
+ if (tmp != NULL) {
+ cl_lock_put(env, tmp);
+ return CLP_GANG_OKAY;
+ }
+ }
+
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
+ CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
+ return CLP_GANG_ABORT;
+}
+
/**
* Returns true iff there are pages under \a olck not protected by other
* locks.
struct cl_lock_descr *descr;
struct cl_object *obj;
struct osc_object *oob;
- struct cl_page_list *plist;
- struct cl_page *page;
struct cl_env_nest nest;
struct cl_io *io;
struct lu_env *env;
int result;
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- obj = olck->ols_cl.cls_obj;
- oob = cl2osc(obj);
- io = &oob->oo_debug_io;
- lock = olck->ols_cl.cls_lock;
- descr = &lock->cll_descr;
- plist = &osc_env_info(env)->oti_plist;
- cl_page_list_init(plist);
-
- mutex_lock(&oob->oo_debug_mutex);
-
- io->ci_obj = cl_object_top(obj);
- cl_io_init(env, io, CIT_MISC, io->ci_obj);
- cl_page_gang_lookup(env, obj, io,
- descr->cld_start, descr->cld_end, plist);
- cl_lock_page_list_fixup(env, io, lock, plist);
- if (plist->pl_nr > 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
- cl_page_list_for_each(page, plist)
- CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
- }
- result = plist->pl_nr > 0;
- cl_page_list_disown(env, io, plist);
- cl_page_list_fini(env, plist);
- cl_io_fini(env, io);
- mutex_unlock(&oob->oo_debug_mutex);
- cl_env_nested_put(&nest, env);
- } else
- result = 0;
- return result;
+ if (IS_ERR(env))
+ return 0;
+
+ obj = olck->ols_cl.cls_obj;
+ oob = cl2osc(obj);
+ io = &oob->oo_debug_io;
+ lock = olck->ols_cl.cls_lock;
+ descr = &lock->cll_descr;
+
+ mutex_lock(&oob->oo_debug_mutex);
+
+ io->ci_obj = cl_object_top(obj);
+ io->ci_ignore_layout = 1;
+ cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ do {
+ result = cl_page_gang_lookup(env, obj, io,
+ descr->cld_start, descr->cld_end,
+ check_cb, (void *)lock);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cfs_cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
+ mutex_unlock(&oob->oo_debug_mutex);
+ cl_env_nested_put(&nest, env);
+
+ return (result == CLP_GANG_ABORT);
}
#else
-# define osc_lock_has_pages(olck) (0)
+static int osc_lock_has_pages(struct osc_lock *olck)
+{
+ return 0;
+}
#endif /* INVARIANT_CHECK */
static void osc_lock_delete(const struct lu_env *env,
struct osc_lock *olck;
olck = cl2osc_lock(slice);
+ if (olck->ols_glimpse) {
+ LASSERT(!olck->ols_hold);
+ LASSERT(!olck->ols_lock);
+ return;
+ }
+
LINVRNT(osc_lock_invariant(olck));
LINVRNT(!osc_lock_has_pages(olck));
- if (olck->ols_hold)
- osc_lock_unuse(env, slice);
+ osc_lock_unhold(olck);
osc_lock_detach(env, olck);
}
enum cl_lock_state state)
{
struct osc_lock *lock = cl2osc_lock(slice);
- struct osc_io *oio = osc_env_io(env);
/*
* XXX multiple io contexts can use the lock at the same time.
*/
LINVRNT(osc_lock_invariant(lock));
if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
+ struct osc_io *oio = osc_env_io(env);
+
LASSERT(lock->ols_owner == NULL);
lock->ols_owner = oio;
} else if (state != CLS_HELD)
/*
* XXX print ldlm lock and einfo properly.
*/
- (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
+ (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
lock->ols_state, lock->ols_owner);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
return 0;
}
+static int osc_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+
+ if (need->cld_enq_flags & CEF_NEVER)
+ return 0;
+
+ if (ols->ols_state >= OLS_CANCELLED)
+ return 0;
+
+ if (need->cld_mode == CLM_PHANTOM) {
+ if (ols->ols_agl)
+ return !(ols->ols_state > OLS_RELEASED);
+
+ /*
+ * Note: the QUEUED lock can't be matched here, otherwise
+ * it might cause the deadlocks.
+ * In read_process,
+ * P1: enqueued read lock, create sublock1
+ * P2: enqueued write lock, create sublock2(conflicted
+ * with sublock1).
+ * P1: Grant read lock.
+ * P1: enqueued glimpse lock(with holding sublock1_read),
+ * matched with sublock2, waiting sublock2 to be granted.
+ * But sublock2 can not be granted, because P1
+ * will not release sublock1. Bang!
+ */
+ if (ols->ols_state < OLS_GRANTED ||
+ ols->ols_state > OLS_RELEASED)
+ return 0;
+ } else if (need->cld_enq_flags & CEF_MUST) {
+ /*
+ * If the lock hasn't ever enqueued, it can't be matched
+ * because enqueue process brings in many information
+ * which can be used to determine things such as lockless,
+ * CEF_MUST, etc.
+ */
+ if (ols->ols_state < OLS_UPCALL_RECEIVED &&
+ ols->ols_locklessable)
+ return 0;
+ }
+ return 1;
+}
+
static const struct cl_lock_operations osc_lock_ops = {
.clo_fini = osc_lock_fini,
.clo_enqueue = osc_lock_enqueue,
.clo_state = osc_lock_state,
.clo_cancel = osc_lock_cancel,
.clo_weigh = osc_lock_weigh,
- .clo_print = osc_lock_print
+ .clo_print = osc_lock_print,
+ .clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
-{
- LBUG();
- return 0;
-}
-
static int osc_lock_lockless_unuse(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
enum cl_lock_state state)
{
struct osc_lock *lock = cl2osc_lock(slice);
- struct osc_io *oio = osc_env_io(env);
LINVRNT(osc_lock_invariant(lock));
if (state == CLS_HELD) {
- LASSERT(lock->ols_owner == NULL);
+ struct osc_io *oio = osc_env_io(env);
+
+ LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
lock->ols_owner = oio;
/* set the io to be lockless if this lock is for io's
* host object */
if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
oio->oi_lockless = 1;
- } else
- lock->ols_owner = NULL;
+ }
}
static int osc_lock_lockless_fits_into(const struct lu_env *env,
const struct cl_lock_descr *need,
const struct cl_io *io)
{
- return 0;
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ if (!(need->cld_enq_flags & CEF_NEVER))
+ return 0;
+
+ /* lockless lock should only be used by its owning io. b22147 */
+ return (lock->ols_owner == osc_env_io(env));
}
static const struct cl_lock_operations osc_lock_lockless_ops = {
.clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_lockless_enqueue,
+ .clo_enqueue = osc_lock_enqueue,
.clo_wait = osc_lock_lockless_wait,
.clo_unuse = osc_lock_lockless_unuse,
.clo_state = osc_lock_lockless_state,
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *_)
+ const struct cl_io *unused)
{
struct osc_lock *clk;
int result;
OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
if (clk != NULL) {
- osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
- clk->ols_state = OLS_NEW;
- cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+ __u32 enqflags = lock->cll_descr.cld_enq_flags;
+
+ osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
+ cfs_atomic_set(&clk->ols_pageref, 0);
+ clk->ols_state = OLS_NEW;
+
+ clk->ols_flags = osc_enq2ldlm_flags(enqflags);
+ clk->ols_agl = !!(enqflags & CEF_AGL);
+ if (clk->ols_agl)
+ clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ if (clk->ols_flags & LDLM_FL_HAS_INTENT)
+ clk->ols_glimpse = 1;
+
+ cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+
+ if (!(enqflags & CEF_MUST))
+ /* try to convert this lock to a lockless lock */
+ osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
+ if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
+ clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
+
result = 0;
} else
result = -ENOMEM;
return result;
}
+int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+{
+ struct osc_lock *olock;
+ int rc = 0;
+
+ spin_lock(&osc_ast_guard);
+ olock = dlm->l_ast_data;
+ /*
+ * there's a very rare race with osc_page_addref_lock(), but that
+ * doesn't matter because in the worst case we don't cancel a lock
+ * which we actually can, that's no harm.
+ */
+ if (olock != NULL &&
+ cfs_atomic_add_return(_PAGEREF_MAGIC,
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
+ rc = 1;
+ }
+ spin_unlock(&osc_ast_guard);
+ return rc;
+}
/** @} osc */