X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_lock.c;h=459944abe4dfa552280258274efe4948b03733a1;hp=467295f041e5fea01058e6ae5a91ff80e419722d;hb=f625f670afbe954030ff81f0f8522137d6cdd335;hpb=c90c9d4f26cf7223ff73f8cf82f7aa33bb6d9a54 diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index 467295f..459944a 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,26 +36,21 @@ * Implementation of cl_lock for OSC layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ #define DEBUG_SUBSYSTEM S_OSC -#ifdef __KERNEL__ -# include -#else -# include -#endif +#include /* fid_build_reg_res_name() */ #include #include "osc_cl_internal.h" -/** \addtogroup osc - * @{ +/** \addtogroup osc + * @{ */ -#define _PAGEREF_MAGIC (-10000000) - /***************************************************************************** * * Type conversions. @@ -66,7 +61,6 @@ static const struct cl_lock_operations osc_lock_ops; static const struct cl_lock_operations osc_lock_lockless_ops; static void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols, int force); -static int osc_lock_has_pages(struct osc_lock *olck); int osc_lock_is_lockless(const struct osc_lock *olck) { @@ -93,35 +87,49 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) */ static int osc_lock_invariant(struct osc_lock *ols) { - struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); - struct ldlm_lock *olock = ols->ols_lock; - int handle_used = lustre_handle_is_used(&ols->ols_handle); - - return - ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL) || - (ergo(olock != NULL, handle_used) && - ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie) && - /* - * Check that ->ols_handle and ->ols_lock are consistent, but - * take into account that they are set at the different time. - */ - ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL)) && - ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used) && - /* - * DLM lock is destroyed only after we have seen cancellation - * ast. - */ - ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - !olock->l_destroyed) && - ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)); + struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); + struct ldlm_lock *olock = ols->ols_dlmlock; + int handle_used = lustre_handle_is_used(&ols->ols_handle); + + if (ergo(osc_lock_is_lockless(ols), + ols->ols_locklessable && ols->ols_dlmlock == NULL)) + return 1; + + /* + * If all the following "ergo"s are true, return 1, otherwise 0 + */ + if (! ergo(olock != NULL, handle_used)) + return 0; + + if (! ergo(olock != NULL, + olock->l_handle.h_cookie == ols->ols_handle.cookie)) + return 0; + + if (! ergo(handle_used, + ergo(lock != NULL && olock != NULL, lock == olock) && + ergo(lock == NULL, olock == NULL))) + return 0; + /* + * Check that ->ols_handle and ->ols_dlmlock are consistent, but + * take into account that they are set at the different time. + */ + if (! ergo(ols->ols_state == OLS_CANCELLED, + olock == NULL && !handle_used)) + return 0; + /* + * DLM lock is destroyed only after we have seen cancellation + * ast. + */ + if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, + !ldlm_is_destroyed(olock))) + return 0; + + if (! ergo(ols->ols_state == OLS_GRANTED, + olock != NULL && + olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) + return 0; + return 1; } /***************************************************************************** @@ -130,136 +138,15 @@ static int osc_lock_invariant(struct osc_lock *ols) * */ -/** - * Breaks a link between osc_lock and dlm_lock. - */ -static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) -{ - struct ldlm_lock *dlmlock; - - cfs_spin_lock(&osc_ast_guard); - dlmlock = olck->ols_lock; - if (dlmlock == NULL) { - cfs_spin_unlock(&osc_ast_guard); - return; - } - - olck->ols_lock = NULL; - /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ - dlmlock->l_ast_data = NULL; - olck->ols_handle.cookie = 0ULL; - cfs_spin_unlock(&osc_ast_guard); - - lock_res_and_lock(dlmlock); - if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { - struct cl_object *obj = olck->ols_cl.cls_obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - __u64 old_kms; - - cl_object_attr_lock(obj); - /* Must get the value under the lock to avoid possible races. */ - old_kms = cl2osc(obj)->oo_oinfo->loi_kms; - /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ - attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); - - cl_object_attr_set(env, obj, attr, CAT_KMS); - cl_object_attr_unlock(obj); - } - unlock_res_and_lock(dlmlock); - - /* release a reference taken in osc_lock_upcall0(). */ - LASSERT(olck->ols_has_ref); - lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); - LDLM_LOCK_RELEASE(dlmlock); - olck->ols_has_ref = 0; -} - -static int osc_lock_unhold(struct osc_lock *ols) -{ - int result = 0; - - if (ols->ols_hold) { - ols->ols_hold = 0; - result = osc_cancel_base(&ols->ols_handle, - ols->ols_einfo.ei_mode); - } - return result; -} - -static int osc_lock_unuse(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(ols)); - - switch (ols->ols_state) { - case OLS_NEW: - LASSERT(!ols->ols_hold); - LASSERT(ols->ols_agl); - return 0; - case OLS_ENQUEUED: - case OLS_UPCALL_RECEIVED: - LASSERT(!ols->ols_hold); - ols->ols_state = OLS_NEW; - return 0; - case OLS_GRANTED: - LASSERT(!ols->ols_glimpse); - LASSERT(ols->ols_hold); - /* - * Move lock into OLS_RELEASED state before calling - * osc_cancel_base() so that possible synchronous cancellation - * (that always happens e.g., for liblustre) sees that lock is - * released. - */ - ols->ols_state = OLS_RELEASED; - return osc_lock_unhold(ols); - default: - CERROR("Impossible state: %d\n", ols->ols_state); - LBUG(); - } -} - static void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) { - struct osc_lock *ols = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(ols)); - /* - * ->ols_hold can still be true at this point if, for example, a - * thread that requested a lock was killed (and released a reference - * to the lock), before reply from a server was received. In this case - * lock is destroyed immediately after upcall. - */ - osc_lock_unhold(ols); - LASSERT(ols->ols_lock == NULL); - LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 || - cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); - - OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); -} + struct osc_lock *ols = cl2osc_lock(slice); -void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj, - struct ldlm_res_id *resname) -{ - const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu); - if (0) { - /* - * In the perfect world of the future, where ost servers talk - * idif-fids... - */ - fid_build_reg_res_name(fid, resname); - } else { - /* - * In reality, where ost server expects ->lsm_object_id and - * ->lsm_object_seq in rename. - */ - osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq, - resname); - } + LINVRNT(osc_lock_invariant(ols)); + LASSERT(ols->ols_dlmlock == NULL); + + OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); } static void osc_lock_build_policy(const struct lu_env *env, @@ -272,64 +159,21 @@ static void osc_lock_build_policy(const struct lu_env *env, policy->l_extent.gid = d->cld_gid; } -static int osc_enq2ldlm_flags(__u32 enqflags) +static __u64 osc_enq2ldlm_flags(__u32 enqflags) { - int result = 0; - - LASSERT((enqflags & ~CEF_MASK) == 0); - - if (enqflags & CEF_NONBLOCK) - result |= LDLM_FL_BLOCK_NOWAIT; - if (enqflags & CEF_ASYNC) - result |= LDLM_FL_HAS_INTENT; - if (enqflags & CEF_DISCARD_DATA) - result |= LDLM_AST_DISCARD_DATA; - return result; -} - -/** - * Global spin-lock protecting consistency of ldlm_lock::l_ast_data - * pointers. Initialized in osc_init(). - */ -cfs_spinlock_t osc_ast_guard; - -static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) -{ - struct osc_lock *olck; - - lock_res_and_lock(dlm_lock); - cfs_spin_lock(&osc_ast_guard); - olck = dlm_lock->l_ast_data; - if (olck != NULL) { - struct cl_lock *lock = olck->ols_cl.cls_lock; - /* - * If osc_lock holds a reference on ldlm lock, return it even - * when cl_lock is in CLS_FREEING state. This way - * - * osc_ast_data_get(dlmlock) == NULL - * - * guarantees that all osc references on dlmlock were - * released. osc_dlm_blocking_ast0() relies on that. - */ - if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) { - cl_lock_get_trust(lock); - lu_ref_add_atomic(&lock->cll_reference, - "ast", cfs_current()); - } else - olck = NULL; - } - cfs_spin_unlock(&osc_ast_guard); - unlock_res_and_lock(dlm_lock); - return olck; -} - -static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck) -{ - struct cl_lock *lock; - - lock = olck->ols_cl.cls_lock; - lu_ref_del(&lock->cll_reference, "ast", cfs_current()); - cl_lock_put(env, lock); + __u64 result = 0; + + LASSERT((enqflags & ~CEF_MASK) == 0); + + if (enqflags & CEF_NONBLOCK) + result |= LDLM_FL_BLOCK_NOWAIT; + if (enqflags & CEF_ASYNC) + result |= LDLM_FL_HAS_INTENT; + if (enqflags & CEF_DISCARD_DATA) + result |= LDLM_FL_AST_DISCARD_DATA; + if (enqflags & CEF_PEEK) + result |= LDLM_FL_TEST_LOCK; + return result; } /** @@ -342,38 +186,34 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck) * * Called under lock and resource spin-locks. */ -static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, - int rc) +static void osc_lock_lvb_update(const struct lu_env *env, + struct osc_object *osc, + struct ldlm_lock *dlmlock, + struct ost_lvb *lvb) { - struct ost_lvb *lvb; - struct cl_object *obj; - struct lov_oinfo *oinfo; - struct cl_attr *attr; - unsigned valid; - - ENTRY; + struct cl_object *obj = osc2cl(osc); + struct lov_oinfo *oinfo = osc->oo_oinfo; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + unsigned valid; - if (!(olck->ols_flags & LDLM_FL_LVB_READY)) - RETURN_EXIT; + ENTRY; - lvb = &olck->ols_lvb; - obj = olck->ols_cl.cls_obj; - oinfo = cl2osc(obj)->oo_oinfo; - attr = &osc_env_info(env)->oti_attr; - valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; - cl_lvb2attr(attr, lvb); + valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; + if (lvb == NULL) { + LASSERT(dlmlock != NULL); + lvb = dlmlock->l_lvb_data; + } + cl_lvb2attr(attr, lvb); - cl_object_attr_lock(obj); - if (rc == 0) { - struct ldlm_lock *dlmlock; - __u64 size; + cl_object_attr_lock(obj); + if (dlmlock != NULL) { + __u64 size; - dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); + check_res_locked(dlmlock->l_resource); - /* re-grab LVB from a dlm lock under DLM spin-locks. */ - *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; + LASSERT(lvb == dlmlock->l_lvb_data); size = lvb->lvb_size; + /* Extend KMS up to the end of this lock and no further * A lock on [x,y] means a KMS of up to y + 1 bytes! */ if (size > dlmlock->l_policy_data.l_extent.end) @@ -389,246 +229,195 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, lvb->lvb_size, oinfo->loi_kms, dlmlock->l_policy_data.l_extent.end); } - ldlm_lock_allow_match_locked(dlmlock); - } else if (rc == -ENAVAIL && olck->ols_glimpse) { - CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving" - " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms); - } else - valid = 0; + ldlm_lock_allow_match_locked(dlmlock); + } - if (valid != 0) - cl_object_attr_set(env, obj, attr, valid); + cl_object_attr_update(env, obj, attr, valid); + cl_object_attr_unlock(obj); - cl_object_attr_unlock(obj); + EXIT; +} - EXIT; +static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, + struct lustre_handle *lockh, bool lvb_update) +{ + struct ldlm_lock *dlmlock; + + dlmlock = ldlm_handle2lock_long(lockh, 0); + LASSERT(dlmlock != NULL); + + /* lock reference taken by ldlm_handle2lock_long() is + * owned by osc_lock and released in osc_lock_detach() + */ + lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); + oscl->ols_has_ref = 1; + + LASSERT(oscl->ols_dlmlock == NULL); + oscl->ols_dlmlock = dlmlock; + + /* This may be a matched lock for glimpse request, do not hold + * lock reference in that case. */ + if (!oscl->ols_glimpse) { + /* hold a refc for non glimpse lock which will + * be released in osc_lock_cancel() */ + lustre_handle_copy(&oscl->ols_handle, lockh); + ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); + oscl->ols_hold = 1; + } + + /* Lock must have been granted. */ + lock_res_and_lock(dlmlock); + if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { + struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; + struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; + + /* extend the lock extent, otherwise it will have problem when + * we decide whether to grant a lockless lock. */ + descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); + descr->cld_start = cl_index(descr->cld_obj, ext->start); + descr->cld_end = cl_index(descr->cld_obj, ext->end); + descr->cld_gid = ext->gid; + + /* no lvb update for matched lock */ + if (lvb_update) { + LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); + osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), + dlmlock, NULL); + } + LINVRNT(osc_lock_invariant(oscl)); + } + unlock_res_and_lock(dlmlock); + + LASSERT(oscl->ols_state != OLS_GRANTED); + oscl->ols_state = OLS_GRANTED; } /** - * Called when a lock is granted, from an upcall (when server returned a - * granted lock), or from completion AST, when server returned a blocked lock. - * - * Called under lock and resource spin-locks, that are released temporarily - * here. + * Lock upcall function that is executed either when a reply to ENQUEUE rpc is + * received from a server, or after osc_enqueue_base() matched a local DLM + * lock. */ -static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, - struct ldlm_lock *dlmlock, int rc) +static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, + int errcode) { - struct ldlm_extent *ext; - struct cl_lock *lock; - struct cl_lock_descr *descr; + struct osc_lock *oscl = cookie; + struct cl_lock_slice *slice = &oscl->ols_cl; + struct lu_env *env; + struct cl_env_nest nest; + int rc; - LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); + ENTRY; - ENTRY; - if (olck->ols_state < OLS_GRANTED) { - lock = olck->ols_cl.cls_lock; - ext = &dlmlock->l_policy_data.l_extent; - descr = &osc_env_info(env)->oti_descr; - descr->cld_obj = lock->cll_descr.cld_obj; - - /* XXX check that ->l_granted_mode is valid. */ - descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); - descr->cld_start = cl_index(descr->cld_obj, ext->start); - descr->cld_end = cl_index(descr->cld_obj, ext->end); - descr->cld_gid = ext->gid; - /* - * tell upper layers the extent of the lock that was actually - * granted - */ - olck->ols_state = OLS_GRANTED; - osc_lock_lvb_update(env, olck, rc); - - /* release DLM spin-locks to allow cl_lock_{modify,signal}() - * to take a semaphore on a parent lock. This is safe, because - * spin-locks are needed to protect consistency of - * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ - unlock_res_and_lock(dlmlock); - cl_lock_modify(env, lock, descr); - cl_lock_signal(env, lock); - LINVRNT(osc_lock_invariant(olck)); - lock_res_and_lock(dlmlock); - } - EXIT; + env = cl_env_nested_get(&nest); + /* should never happen, similar to osc_ldlm_blocking_ast(). */ + LASSERT(!IS_ERR(env)); + + rc = ldlm_error2errno(errcode); + if (oscl->ols_state == OLS_ENQUEUED) { + oscl->ols_state = OLS_UPCALL_RECEIVED; + } else if (oscl->ols_state == OLS_CANCELLED) { + rc = -EIO; + } else { + CERROR("Impossible state: %d\n", oscl->ols_state); + LBUG(); + } + + if (rc == 0) + osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK); + + /* Error handling, some errors are tolerable. */ + if (oscl->ols_locklessable && rc == -EUSERS) { + /* This is a tolerable error, turn this lock into + * lockless lock. + */ + osc_object_set_contended(cl2osc(slice->cls_obj)); + LASSERT(slice->cls_ops == &osc_lock_ops); + + /* Change this lock to ldlmlock-less lock. */ + osc_lock_to_lockless(env, oscl, 1); + oscl->ols_state = OLS_GRANTED; + rc = 0; + } else if (oscl->ols_glimpse && rc == -ENAVAIL) { + LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); + osc_lock_lvb_update(env, cl2osc(slice->cls_obj), + NULL, &oscl->ols_lvb); + /* Hide the error. */ + rc = 0; + } + + if (oscl->ols_owner != NULL) + cl_sync_io_note(env, oscl->ols_owner, rc); + cl_env_nested_put(&nest, env); + + RETURN(rc); } -static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) - +static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh, + int errcode) { - struct ldlm_lock *dlmlock; - - ENTRY; + struct osc_object *osc = cookie; + struct ldlm_lock *dlmlock; + struct lu_env *env; + struct cl_env_nest nest; + ENTRY; - dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); - - lock_res_and_lock(dlmlock); - cfs_spin_lock(&osc_ast_guard); - LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); - olck->ols_lock = dlmlock; - cfs_spin_unlock(&osc_ast_guard); - - /* - * Lock might be not yet granted. In this case, completion ast - * (osc_ldlm_completion_ast()) comes later and finishes lock - * granting. - */ - if (dlmlock->l_granted_mode == dlmlock->l_req_mode) - osc_lock_granted(env, olck, dlmlock, 0); - unlock_res_and_lock(dlmlock); - - /* - * osc_enqueue_interpret() decrefs asynchronous locks, counter - * this. - */ - ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); - olck->ols_hold = 1; - - /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ - lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); - olck->ols_has_ref = 1; -} + env = cl_env_nested_get(&nest); + LASSERT(!IS_ERR(env)); -/** - * Lock upcall function that is executed either when a reply to ENQUEUE rpc is - * received from a server, or after osc_enqueue_base() matched a local DLM - * lock. - */ -static int osc_lock_upcall(void *cookie, int errcode) -{ - struct osc_lock *olck = cookie; - struct cl_lock_slice *slice = &olck->ols_cl; - struct cl_lock *lock = slice->cls_lock; - struct lu_env *env; - struct cl_env_nest nest; + if (errcode == ELDLM_LOCK_MATCHED) + GOTO(out, errcode = ELDLM_OK); - ENTRY; - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - int rc; - - cl_lock_mutex_get(env, lock); - - LASSERT(lock->cll_state >= CLS_QUEUING); - if (olck->ols_state == OLS_ENQUEUED) { - olck->ols_state = OLS_UPCALL_RECEIVED; - rc = ldlm_error2errno(errcode); - } else if (olck->ols_state == OLS_CANCELLED) { - rc = -EIO; - } else { - CERROR("Impossible state: %d\n", olck->ols_state); - LBUG(); - } - if (rc) { - struct ldlm_lock *dlmlock; - - dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { - lock_res_and_lock(dlmlock); - cfs_spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); - dlmlock->l_ast_data = NULL; - olck->ols_handle.cookie = 0ULL; - cfs_spin_unlock(&osc_ast_guard); - ldlm_lock_fail_match_locked(dlmlock); - unlock_res_and_lock(dlmlock); - LDLM_LOCK_PUT(dlmlock); - } - } else { - if (olck->ols_glimpse) - olck->ols_glimpse = 0; - osc_lock_upcall0(env, olck); - } + if (errcode != ELDLM_OK) + GOTO(out, errcode); - /* Error handling, some errors are tolerable. */ - if (olck->ols_locklessable && rc == -EUSERS) { - /* This is a tolerable error, turn this lock into - * lockless lock. - */ - osc_object_set_contended(cl2osc(slice->cls_obj)); - LASSERT(slice->cls_ops == &osc_lock_ops); - - /* Change this lock to ldlmlock-less lock. */ - osc_lock_to_lockless(env, olck, 1); - olck->ols_state = OLS_GRANTED; - rc = 0; - } else if (olck->ols_glimpse && rc == -ENAVAIL) { - osc_lock_lvb_update(env, olck, rc); - cl_lock_delete(env, lock); - /* Hide the error. */ - rc = 0; - } + dlmlock = ldlm_handle2lock(lockh); + LASSERT(dlmlock != NULL); - if (rc == 0) { - /* For AGL case, the RPC sponsor may exits the cl_lock - * processing without wait() called before related OSC - * lock upcall(). So update the lock status according - * to the enqueue result inside AGL upcall(). */ - if (olck->ols_agl) { - lock->cll_flags |= CLF_FROM_UPCALL; - cl_wait_try(env, lock); - lock->cll_flags &= ~CLF_FROM_UPCALL; - if (!olck->ols_glimpse) - olck->ols_agl = 0; - } - cl_lock_signal(env, lock); - /* del user for lock upcall cookie */ - cl_unuse_try(env, lock); - } else { - /* del user for lock upcall cookie */ - cl_lock_user_del(env, lock); - cl_lock_error(env, lock, rc); - } + lock_res_and_lock(dlmlock); + LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); - cl_lock_mutex_put(env, lock); + /* there is no osc_lock associated with AGL lock */ + osc_lock_lvb_update(env, osc, dlmlock, NULL); - /* release cookie reference, acquired by osc_lock_enqueue() */ - lu_ref_del(&lock->cll_reference, "upcall", lock); - cl_lock_put(env, lock); + unlock_res_and_lock(dlmlock); + LDLM_LOCK_PUT(dlmlock); - cl_env_nested_put(&nest, env); - } else - /* should never happen, similar to osc_ldlm_blocking_ast(). */ - LBUG(); - RETURN(errcode); +out: + cl_object_put(env, osc2cl(osc)); + cl_env_nested_put(&nest, env); + RETURN(ldlm_error2errno(errcode)); } -/** - * Core of osc_dlm_blocking_ast() logic. - */ -static void osc_lock_blocking(const struct lu_env *env, - struct ldlm_lock *dlmlock, - struct osc_lock *olck, int blocking) +static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end, + enum cl_lock_mode mode, int discard) { - struct cl_lock *lock = olck->ols_cl.cls_lock; - - LASSERT(olck->ols_lock == dlmlock); - CLASSERT(OLS_BLOCKED < OLS_CANCELLED); - LASSERT(!osc_lock_is_lockless(olck)); - - /* - * Lock might be still addref-ed here, if e.g., blocking ast - * is sent for a failed lock. - */ - osc_lock_unhold(olck); - - if (blocking && olck->ols_state < OLS_BLOCKED) - /* - * Move osc_lock into OLS_BLOCKED before canceling the lock, - * because it recursively re-enters osc_lock_blocking(), with - * the state set to OLS_CANCELLED. - */ - olck->ols_state = OLS_BLOCKED; - /* - * cancel and destroy lock at least once no matter how blocking ast is - * entered (see comment above osc_ldlm_blocking_ast() for use - * cases). cl_lock_cancel() and cl_lock_delete() are idempotent. - */ - cl_lock_cancel(env, lock); - cl_lock_delete(env, lock); + struct lu_env *env; + struct cl_env_nest nest; + int rc = 0; + int rc2 = 0; + + ENTRY; + + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + if (mode == CLM_WRITE) { + rc = osc_cache_writeback_range(env, obj, start, end, 1, + discard); + CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n", + obj, start, end, rc, + discard ? "discarded" : "written back"); + if (rc > 0) + rc = 0; + } + + rc2 = osc_lock_discard_pages(env, obj, start, end, mode); + if (rc == 0 && rc2 < 0) + rc = rc2; + + cl_env_nested_put(&nest, env); + RETURN(rc); } /** @@ -639,66 +428,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, struct ldlm_lock *dlmlock, void *data, int flag) { - struct osc_lock *olck; - struct cl_lock *lock; - int result; - int cancel; - - LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING); - - cancel = 0; - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; - cl_lock_mutex_get(env, lock); - LINVRNT(osc_lock_invariant(olck)); - if (olck->ols_ast_wait) { - /* wake up osc_lock_use() */ - cl_lock_signal(env, lock); - olck->ols_ast_wait = 0; - } - /* - * Lock might have been canceled while this thread was - * sleeping for lock mutex, but olck is pinned in memory. - */ - if (olck == dlmlock->l_ast_data) { - /* - * NOTE: DLM sends blocking AST's for failed locks - * (that are still in pre-OLS_GRANTED state) - * too, and they have to be canceled otherwise - * DLM lock is never destroyed and stuck in - * the memory. - * - * Alternatively, ldlm_cli_cancel() can be - * called here directly for osc_locks with - * ols_state < OLS_GRANTED to maintain an - * invariant that ->clo_cancel() is only called - * for locks that were granted. - */ - LASSERT(data == olck); - osc_lock_blocking(env, dlmlock, - olck, flag == LDLM_CB_BLOCKING); - } else - cancel = 1; - cl_lock_mutex_put(env, lock); - osc_ast_data_put(env, olck); - } else - /* - * DLM lock exists, but there is no cl_lock attached to it. - * This is a `normal' race. cl_object and its cl_lock's can be - * removed by memory pressure, together with all pages. - */ - cancel = (flag == LDLM_CB_BLOCKING); - - if (cancel) { - struct lustre_handle *lockh; - - lockh = &osc_env_info(env)->oti_handle; - ldlm_lock2handle(dlmlock, lockh); - result = ldlm_cli_cancel(lockh); - } else - result = 0; - return result; + struct cl_object *obj = NULL; + int result = 0; + int discard; + enum cl_lock_mode mode = CLM_READ; + ENTRY; + + LASSERT(flag == LDLM_CB_CANCELING); + + lock_res_and_lock(dlmlock); + if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { + dlmlock->l_ast_data = NULL; + unlock_res_and_lock(dlmlock); + RETURN(0); + } + + discard = ldlm_is_discard_data(dlmlock); + if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) + mode = CLM_WRITE; + + if (dlmlock->l_ast_data != NULL) { + obj = osc2cl(dlmlock->l_ast_data); + dlmlock->l_ast_data = NULL; + + cl_object_get(obj); + } + + unlock_res_and_lock(dlmlock); + + /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or + * the object has been destroyed. */ + if (obj != NULL) { + struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + __u64 old_kms; + + /* Destroy pages covered by the extent of the DLM lock */ + result = osc_lock_flush(cl2osc(obj), + cl_index(obj, extent->start), + cl_index(obj, extent->end), + mode, discard); + + /* losing a lock, update kms */ + lock_res_and_lock(dlmlock); + cl_object_attr_lock(obj); + /* Must get the value under the lock to avoid race. */ + old_kms = cl2osc(obj)->oo_oinfo->loi_kms; + /* Update the kms. Need to loop all granted locks. + * Not a problem for the client */ + attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); + + cl_object_attr_update(env, obj, attr, CAT_KMS); + cl_object_attr_unlock(obj); + unlock_res_and_lock(dlmlock); + + cl_object_put(env, obj); + } + RETURN(result); } /** @@ -726,7 +512,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, * * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify * us that dlmlock conflicts with another lock that some client is - * enqueing. Lock is canceled. + * enqueuing. Lock is canceled. * * - cl_lock_cancel() is called. osc_lock_cancel() calls * ldlm_cli_cancel() that calls @@ -747,125 +533,75 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, struct ldlm_lock_desc *new, void *data, int flag) { - struct lu_env *env; - struct cl_env_nest nest; - int result; - - /* - * This can be called in the context of outer IO, e.g., - * - * cl_enqueue()->... - * ->osc_enqueue_base()->... - * ->ldlm_prep_elc_req()->... - * ->ldlm_cancel_callback()->... - * ->osc_ldlm_blocking_ast() - * - * new environment has to be created to not corrupt outer context. - */ - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); - cl_env_nested_put(&nest, env); - } else { - result = PTR_ERR(env); - /* - * XXX This should never happen, as cl_lock is - * stuck. Pre-allocated environment a la vvp_inode_fini_env - * should be used. - */ - LBUG(); - } - if (result != 0) { - if (result == -ENODATA) - result = 0; - else - CERROR("BAST failed: %d\n", result); - } - return result; -} + int result = 0; + ENTRY; -static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, - int flags, void *data) -{ - struct cl_env_nest nest; - struct lu_env *env; - struct osc_lock *olck; - struct cl_lock *lock; - int result; - int dlmrc; - - /* first, do dlm part of the work */ - dlmrc = ldlm_completion_ast_async(dlmlock, flags, data); - /* then, notify cl_lock */ - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; - cl_lock_mutex_get(env, lock); - /* - * ldlm_handle_cp_callback() copied LVB from request - * to lock->l_lvb_data, store it in osc_lock. - */ - LASSERT(dlmlock->l_lvb_data != NULL); - lock_res_and_lock(dlmlock); - olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) { - /* - * upcall (osc_lock_upcall()) hasn't yet been - * called. Do nothing now, upcall will bind - * olck to dlmlock and signal the waiters. - * - * This maintains an invariant that osc_lock - * and ldlm_lock are always bound when - * osc_lock is in OLS_GRANTED state. - */ - } else if (dlmlock->l_granted_mode == - dlmlock->l_req_mode) { - osc_lock_granted(env, olck, dlmlock, dlmrc); - } - unlock_res_and_lock(dlmlock); + switch (flag) { + case LDLM_CB_BLOCKING: { + struct lustre_handle lockh; + + ldlm_lock2handle(dlmlock, &lockh); + result = ldlm_cli_cancel(&lockh, LCF_ASYNC); + if (result == -ENODATA) + result = 0; + break; + } + case LDLM_CB_CANCELING: { + struct lu_env *env; + struct cl_env_nest nest; + + /* + * This can be called in the context of outer IO, e.g., + * + * osc_enqueue_base()->... + * ->ldlm_prep_elc_req()->... + * ->ldlm_cancel_callback()->... + * ->osc_ldlm_blocking_ast() + * + * new environment has to be created to not corrupt outer + * context. + */ + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) { + result = PTR_ERR(env); + break; + } - if (dlmrc != 0) { - CL_LOCK_DEBUG(D_ERROR, env, lock, - "dlmlock returned %d\n", dlmrc); - cl_lock_error(env, lock, dlmrc); - } - cl_lock_mutex_put(env, lock); - osc_ast_data_put(env, olck); - result = 0; - } else - result = -ELDLM_NO_LOCK_DATA; - cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - return dlmrc ?: result; + result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); + cl_env_nested_put(&nest, env); + break; + } + default: + LBUG(); + } + RETURN(result); } static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) { - struct ptlrpc_request *req = data; - struct osc_lock *olck; - struct cl_lock *lock; - struct cl_object *obj; - struct cl_env_nest nest; - struct lu_env *env; - struct ost_lvb *lvb; - struct req_capsule *cap; - int result; - - LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - /* osc_ast_data_get() has to go after environment is - * allocated, because osc_ast_data() acquires a - * reference to a lock, and it can only be released in - * environment. - */ - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; + struct ptlrpc_request *req = data; + struct cl_env_nest nest; + struct lu_env *env; + struct ost_lvb *lvb; + struct req_capsule *cap; + int result; + + ENTRY; + + LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); + + env = cl_env_nested_get(&nest); + if (!IS_ERR(env)) { + struct cl_object *obj = NULL; + + lock_res_and_lock(dlmlock); + if (dlmlock->l_ast_data != NULL) { + obj = osc2cl(dlmlock->l_ast_data); + cl_object_get(obj); + } + unlock_res_and_lock(dlmlock); + + if (obj != NULL) { /* Do not grab the mutex of cl_lock for glimpse. * See LU-1274 for details. * BTW, it's okay for cl_lock to be cancelled during @@ -879,10 +615,14 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) result = req_capsule_server_pack(cap); if (result == 0) { lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); - obj = lock->cll_descr.cld_obj; result = cl_object_glimpse(env, obj, lvb); } - osc_ast_data_put(env, olck); + if (!exp_connect_lvb_type(req->rq_export)) + req_capsule_shrink(&req->rq_pill, + &RMF_DLM_LVB, + sizeof(struct ost_lvb_v1), + RCL_SERVER); + cl_object_put(env, obj); } else { /* * These errors are normal races, so we don't want to @@ -893,94 +633,122 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) result = -ELDLM_NO_LOCK_DATA; } cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - req->rq_status = result; - return result; + } else + result = PTR_ERR(env); + req->rq_status = result; + RETURN(result); } -static unsigned long osc_lock_weigh(const struct lu_env *env, - const struct cl_lock_slice *slice) +static int weigh_cb(const struct lu_env *env, struct cl_io *io, + struct osc_page *ops, void *cbdata) { - /* - * don't need to grab coh_page_guard since we don't care the exact # - * of pages.. - */ - return cl_object_header(slice->cls_obj)->coh_pages; + struct cl_page *page = ops->ops_cl.cpl_page; + + if (cl_page_is_vmlocked(env, page) + || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage) + ) + return CLP_GANG_ABORT; + + *(pgoff_t *)cbdata = osc_index(ops) + 1; + return CLP_GANG_OKAY; +} + +static unsigned long osc_lock_weight(const struct lu_env *env, + struct osc_object *oscobj, + struct ldlm_extent *extent) +{ + struct cl_io *io = &osc_env_info(env)->oti_io; + struct cl_object *obj = cl_object_top(&oscobj->oo_cl); + pgoff_t page_index; + int result; + ENTRY; + + io->ci_obj = obj; + io->ci_ignore_layout = 1; + result = cl_io_init(env, io, CIT_MISC, io->ci_obj); + if (result != 0) + RETURN(result); + + page_index = cl_index(obj, extent->start); + do { + result = osc_page_gang_lookup(env, io, oscobj, + page_index, + cl_index(obj, extent->end), + weigh_cb, (void *)&page_index); + if (result == CLP_GANG_ABORT) + break; + if (result == CLP_GANG_RESCHED) + cond_resched(); + } while (result != CLP_GANG_OKAY); + cl_io_fini(env, io); + + return result == CLP_GANG_ABORT ? 1 : 0; } /** * Get the weight of dlm lock for early cancellation. - * - * XXX: it should return the pages covered by this \a dlmlock. */ -static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) +unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) { - struct cl_env_nest nest; - struct lu_env *env; - struct osc_lock *lock; - struct cl_lock *cll; - unsigned long weight; - ENTRY; - - cfs_might_sleep(); - /* - * osc_ldlm_weigh_ast has a complex context since it might be called - * because of lock canceling, or from user's input. We have to make - * a new environment for it. Probably it is implementation safe to use - * the upper context because cl_lock_put don't modify environment - * variables. But in case of .. - */ - env = cl_env_nested_get(&nest); - if (IS_ERR(env)) - /* Mostly because lack of memory, tend to eliminate this lock*/ - RETURN(0); - - LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT); - lock = osc_ast_data_get(dlmlock); - if (lock == NULL) { - /* cl_lock was destroyed because of memory pressure. - * It is much reasonable to assign this type of lock - * a lower cost. - */ - GOTO(out, weight = 0); - } + struct cl_env_nest nest; + struct lu_env *env; + struct osc_object *obj; + struct osc_lock *oscl; + unsigned long weight; + bool found = false; + ENTRY; - cll = lock->ols_cl.cls_lock; - cl_lock_mutex_get(env, cll); - weight = cl_lock_weigh(env, cll); - cl_lock_mutex_put(env, cll); - osc_ast_data_put(env, lock); - EXIT; + might_sleep(); + /* + * osc_ldlm_weigh_ast has a complex context since it might be called + * because of lock canceling, or from user's input. We have to make + * a new environment for it. Probably it is implementation safe to use + * the upper context because cl_lock_put don't modify environment + * variables. But just in case .. + */ + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) + /* Mostly because lack of memory, do not eliminate this lock */ + RETURN(1); + + LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT); + obj = dlmlock->l_ast_data; + if (obj == NULL) + GOTO(out, weight = 1); + + spin_lock(&obj->oo_ol_spin); + list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) { + if (oscl->ols_dlmlock != NULL && oscl->ols_dlmlock != dlmlock) + continue; + found = true; + } + spin_unlock(&obj->oo_ol_spin); + if (found) { + /* + * If the lock is being used by an IO, definitely not cancel it. + */ + GOTO(out, weight = 1); + } + + weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent); + EXIT; out: - cl_env_nested_put(&nest, env); - return weight; + cl_env_nested_put(&nest, env); + return weight; } static void osc_lock_build_einfo(const struct lu_env *env, - const struct cl_lock *clock, - struct osc_lock *lock, - struct ldlm_enqueue_info *einfo) + const struct cl_lock *lock, + struct osc_object *osc, + struct ldlm_enqueue_info *einfo) { - enum cl_lock_mode mode; - - mode = clock->cll_descr.cld_mode; - if (mode == CLM_PHANTOM) - /* - * For now, enqueue all glimpse locks in read mode. In the - * future, client might choose to enqueue LCK_PW lock for - * glimpse on a file opened for write. - */ - mode = CLM_READ; - - einfo->ei_type = LDLM_EXTENT; - einfo->ei_mode = osc_cl_lock2ldlm(mode); - einfo->ei_cb_bl = osc_ldlm_blocking_ast; - einfo->ei_cb_cp = osc_ldlm_completion_ast; - einfo->ei_cb_gl = osc_ldlm_glimpse_ast; - einfo->ei_cb_wg = osc_ldlm_weigh_ast; - einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ + einfo->ei_type = LDLM_EXTENT; + einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode); + einfo->ei_cb_bl = osc_ldlm_blocking_ast; + einfo->ei_cb_cp = ldlm_completion_ast; + einfo->ei_cb_gl = osc_ldlm_glimpse_ast; + einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */ } /** @@ -999,14 +767,12 @@ static void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols, int force) { struct cl_lock_slice *slice = &ols->ols_cl; - struct cl_lock *lock = slice->cls_lock; LASSERT(ols->ols_state == OLS_NEW || ols->ols_state == OLS_UPCALL_RECEIVED); if (force) { ols->ols_locklessable = 1; - LASSERT(cl_lock_is_mutexed(lock)); slice->cls_ops = &osc_lock_lockless_ops; } else { struct osc_io *oio = osc_env_io(env); @@ -1038,114 +804,100 @@ static void osc_lock_to_lockless(const struct lu_env *env, LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); } -static int osc_lock_compatible(const struct osc_lock *qing, - const struct osc_lock *qed) +static bool osc_lock_compatible(const struct osc_lock *qing, + const struct osc_lock *qed) { - enum cl_lock_mode qing_mode; - enum cl_lock_mode qed_mode; - - qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode; - if (qed->ols_glimpse && - (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ)) - return 1; - - qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode; - return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ)); -} - -/** - * Cancel all conflicting locks and wait for them to be destroyed. - * - * This function is used for two purposes: - * - * - early cancel all conflicting locks before starting IO, and - * - * - guarantee that pages added to the page cache by lockless IO are never - * covered by locks other than lockless IO lock, and, hence, are not - * visible to other threads. - */ -static int osc_lock_enqueue_wait(const struct lu_env *env, - const struct osc_lock *olck) -{ - struct cl_lock *lock = olck->ols_cl.cls_lock; - struct cl_lock_descr *descr = &lock->cll_descr; - struct cl_object_header *hdr = cl_object_header(descr->cld_obj); - struct cl_lock *scan; - struct cl_lock *conflict= NULL; - int lockless = osc_lock_is_lockless(olck); - int rc = 0; - ENTRY; + struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr; + struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr; - LASSERT(cl_lock_is_mutexed(lock)); + if (qed->ols_glimpse) + return true; - /* make it enqueue anyway for glimpse lock, because we actually - * don't need to cancel any conflicting locks. */ - if (olck->ols_glimpse) - return 0; + if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ) + return true; - cfs_spin_lock(&hdr->coh_lock_guard); - cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - struct cl_lock_descr *cld = &scan->cll_descr; - const struct osc_lock *scan_ols; + if (qed->ols_state < OLS_GRANTED) + return true; - if (scan == lock) - break; + if (qed_descr->cld_mode >= qing_descr->cld_mode && + qed_descr->cld_start <= qing_descr->cld_start && + qed_descr->cld_end >= qing_descr->cld_end) + return true; - if (scan->cll_state < CLS_QUEUING || - scan->cll_state == CLS_FREEING || - cld->cld_start > descr->cld_end || - cld->cld_end < descr->cld_start) - continue; - - /* overlapped and living locks. */ + return false; +} - /* We're not supposed to give up group lock. */ - if (scan->cll_descr.cld_mode == CLM_GROUP) { - LASSERT(descr->cld_mode != CLM_GROUP || - descr->cld_gid != scan->cll_descr.cld_gid); - continue; - } +static void osc_lock_wake_waiters(const struct lu_env *env, + struct osc_object *osc, + struct osc_lock *oscl) +{ + spin_lock(&osc->oo_ol_spin); + list_del_init(&oscl->ols_nextlock_oscobj); + spin_unlock(&osc->oo_ol_spin); - scan_ols = osc_lock_at(scan); + spin_lock(&oscl->ols_lock); + while (!list_empty(&oscl->ols_waiting_list)) { + struct osc_lock *scan; - /* We need to cancel the compatible locks if we're enqueuing - * a lockless lock, for example: - * imagine that client has PR lock on [0, 1000], and thread T0 - * is doing lockless IO in [500, 1500] region. Concurrent - * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. */ - if (!lockless && osc_lock_compatible(olck, scan_ols)) - continue; + scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock, + ols_wait_entry); + list_del_init(&scan->ols_wait_entry); - cl_lock_get_trust(scan); - conflict = scan; - break; - } - cfs_spin_unlock(&hdr->coh_lock_guard); + cl_sync_io_note(env, scan->ols_owner, 0); + } + spin_unlock(&oscl->ols_lock); +} - if (conflict) { - if (lock->cll_descr.cld_mode == CLM_GROUP) { - /* we want a group lock but a previous lock request - * conflicts, we do not wait but return 0 so the - * request is send to the server - */ - CDEBUG(D_DLMTRACE, "group lock %p is conflicted " - "with %p, no wait, send to server\n", - lock, conflict); - cl_lock_put(env, conflict); - rc = 0; - } else { - CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, " - "will wait\n", - lock, conflict); - LASSERT(lock->cll_conflict == NULL); - lu_ref_add(&conflict->cll_reference, "cancel-wait", - lock); - lock->cll_conflict = conflict; - rc = CLO_WAIT; - } - } - RETURN(rc); +static void osc_lock_enqueue_wait(const struct lu_env *env, + struct osc_object *obj, + struct osc_lock *oscl) +{ + struct osc_lock *tmp_oscl; + struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr; + struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor; + + spin_lock(&obj->oo_ol_spin); + list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list); + +restart: + list_for_each_entry(tmp_oscl, &obj->oo_ol_list, + ols_nextlock_oscobj) { + struct cl_lock_descr *descr; + + if (tmp_oscl == oscl) + break; + + descr = &tmp_oscl->ols_cl.cls_lock->cll_descr; + if (descr->cld_start > need->cld_end || + descr->cld_end < need->cld_start) + continue; + + /* We're not supposed to give up group lock */ + if (descr->cld_mode == CLM_GROUP) + break; + + if (!osc_lock_is_lockless(oscl) && + osc_lock_compatible(oscl, tmp_oscl)) + continue; + + /* wait for conflicting lock to be canceled */ + cl_sync_io_init(waiter, 1, cl_sync_io_end); + oscl->ols_owner = waiter; + + spin_lock(&tmp_oscl->ols_lock); + /* add oscl into tmp's ols_waiting list */ + list_add_tail(&oscl->ols_wait_entry, + &tmp_oscl->ols_waiting_list); + spin_unlock(&tmp_oscl->ols_lock); + + spin_unlock(&obj->oo_ol_spin); + (void)cl_sync_io_wait(env, waiter, 0); + + spin_lock(&obj->oo_ol_spin); + oscl->ols_owner = NULL; + goto restart; + } + spin_unlock(&obj->oo_ol_spin); } /** @@ -1163,199 +915,125 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, * This function does not wait for the network communication to complete. */ static int osc_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *unused, __u32 enqflags) -{ - struct osc_lock *ols = cl2osc_lock(slice); - struct cl_lock *lock = ols->ols_cl.cls_lock; - int result; - ENTRY; - - LASSERT(cl_lock_is_mutexed(lock)); - LASSERTF(ols->ols_state == OLS_NEW, - "Impossible state: %d\n", ols->ols_state); - - ols->ols_flags = osc_enq2ldlm_flags(enqflags); - if (enqflags & CEF_AGL) { - ols->ols_flags |= LDLM_FL_BLOCK_NOWAIT; - ols->ols_agl = 1; - } - if (ols->ols_flags & LDLM_FL_HAS_INTENT) - ols->ols_glimpse = 1; - if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); - - result = osc_lock_enqueue_wait(env, ols); - if (result == 0) { - if (!osc_lock_is_lockless(ols)) { - struct osc_object *obj = cl2osc(slice->cls_obj); - struct osc_thread_info *info = osc_env_info(env); - struct ldlm_res_id *resname = &info->oti_resname; - ldlm_policy_data_t *policy = &info->oti_policy; - struct ldlm_enqueue_info *einfo = &ols->ols_einfo; - - if (ols->ols_locklessable) - ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - - /* a reference for lock, passed as an upcall cookie */ - cl_lock_get(lock); - lu_ref_add(&lock->cll_reference, "upcall", lock); - /* a user for lock also */ - cl_lock_user_add(env, lock); - ols->ols_state = OLS_ENQUEUED; - - /* - * XXX: this is possible blocking point as - * ldlm_lock_match(LDLM_FL_LVB_READY) waits for - * LDLM_CP_CALLBACK. - */ - osc_lock_build_res(env, obj, resname); - osc_lock_build_policy(env, lock, policy); - result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1, ols->ols_agl); - if (result != 0) { - cl_lock_user_del(env, lock); - lu_ref_del(&lock->cll_reference, - "upcall", lock); - cl_lock_put(env, lock); - if (unlikely(result == -ECANCELED)) { - ols->ols_state = OLS_NEW; - result = 0; - } - } - } else { - ols->ols_state = OLS_GRANTED; - ols->ols_owner = osc_env_io(env); - } - } - LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); - RETURN(result); -} - -static int osc_lock_wait(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice, + struct cl_io *unused, struct cl_sync_io *anchor) { - struct osc_lock *olck = cl2osc_lock(slice); - struct cl_lock *lock = olck->ols_cl.cls_lock; - - LINVRNT(osc_lock_invariant(olck)); - - if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) { - if (olck->ols_flags & LDLM_FL_LVB_READY) { - return 0; - } else if (olck->ols_agl) { - if (lock->cll_flags & CLF_FROM_UPCALL) - /* It is from enqueue RPC reply upcall for - * updating state. Do not re-enqueue. */ - return -ENAVAIL; - else - olck->ols_state = OLS_NEW; - } else { - LASSERT(lock->cll_error); - return lock->cll_error; - } - } - - if (olck->ols_state == OLS_NEW) { - int rc; + struct osc_thread_info *info = osc_env_info(env); + struct osc_io *oio = osc_env_io(env); + struct osc_object *osc = cl2osc(slice->cls_obj); + struct osc_lock *oscl = cl2osc_lock(slice); + struct cl_lock *lock = slice->cls_lock; + struct ldlm_res_id *resname = &info->oti_resname; + ldlm_policy_data_t *policy = &info->oti_policy; + osc_enqueue_upcall_f upcall = osc_lock_upcall; + void *cookie = oscl; + bool async = false; + int result; - LASSERT(olck->ols_agl); - - rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST); - if (rc != 0) - return rc; - else - return CLO_REENQUEUED; - } + ENTRY; - LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), + "lock = %p, ols = %p\n", lock, oscl); + + if (oscl->ols_state == OLS_GRANTED) + RETURN(0); + + if (oscl->ols_flags & LDLM_FL_TEST_LOCK) + GOTO(enqueue_base, 0); + + if (oscl->ols_glimpse) { + LASSERT(equi(oscl->ols_agl, anchor == NULL)); + async = true; + GOTO(enqueue_base, 0); + } + + osc_lock_enqueue_wait(env, osc, oscl); + + /* we can grant lockless lock right after all conflicting locks + * are canceled. */ + if (osc_lock_is_lockless(oscl)) { + oscl->ols_state = OLS_GRANTED; + oio->oi_lockless = 1; + RETURN(0); + } + +enqueue_base: + oscl->ols_state = OLS_ENQUEUED; + if (anchor != NULL) { + atomic_inc(&anchor->csi_sync_nr); + oscl->ols_owner = anchor; + } + + /** + * DLM lock's ast data must be osc_object; + * if glimpse or AGL lock, async of osc_enqueue_base() must be true, + * DLM's enqueue callback set to osc_lock_upcall() with cookie as + * osc_lock. + */ + ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); + osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo); + osc_lock_build_policy(env, lock, policy); + if (oscl->ols_agl) { + oscl->ols_einfo.ei_cbdata = NULL; + /* hold a reference for callback */ + cl_object_get(osc2cl(osc)); + upcall = osc_lock_upcall_agl; + cookie = osc; + } + result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags, + policy, &oscl->ols_lvb, + osc->oo_oinfo->loi_kms_valid, + upcall, cookie, + &oscl->ols_einfo, PTLRPCD_SET, async, + oscl->ols_agl); + if (result != 0) { + oscl->ols_state = OLS_CANCELLED; + osc_lock_wake_waiters(env, osc, oscl); + + /* hide error for AGL lock. */ + if (oscl->ols_agl) { + cl_object_put(env, osc2cl(osc)); + result = 0; + } - return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; + if (anchor != NULL) + cl_sync_io_note(env, anchor, result); + } else { + if (osc_lock_is_lockless(oscl)) { + oio->oi_lockless = 1; + } else if (!async) { + LASSERT(oscl->ols_state == OLS_GRANTED); + LASSERT(oscl->ols_hold); + LASSERT(oscl->ols_dlmlock != NULL); + } + } + RETURN(result); } /** - * An implementation of cl_lock_operations::clo_use() method that pins cached - * lock. + * Breaks a link between osc_lock and dlm_lock. */ -static int osc_lock_use(const struct lu_env *env, - const struct cl_lock_slice *slice) +static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) { - struct osc_lock *olck = cl2osc_lock(slice); - int rc; - - LASSERT(!olck->ols_hold); - - /* - * Atomically check for LDLM_FL_CBPENDING and addref a lock if this - * flag is not set. This protects us from a concurrent blocking ast. - */ - rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode); - if (rc == 0) { - olck->ols_hold = 1; - olck->ols_state = OLS_GRANTED; - } else { - struct cl_lock *lock; - - /* - * Lock is being cancelled somewhere within - * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already - * set, but osc_ldlm_blocking_ast() hasn't yet acquired - * cl_lock mutex. - */ - lock = slice->cls_lock; - LASSERT(lock->cll_state == CLS_INTRANSIT); - LASSERT(lock->cll_users > 0); - /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ - olck->ols_ast_wait = 1; - rc = CLO_WAIT; - } - return rc; -} + struct ldlm_lock *dlmlock; -static int osc_lock_flush(struct osc_lock *ols, int discard) -{ - struct cl_lock *lock = ols->ols_cl.cls_lock; - struct cl_env_nest nest; - struct lu_env *env; - int result = 0; - ENTRY; + dlmlock = olck->ols_dlmlock; + if (dlmlock == NULL) + return; - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj); - struct cl_lock_descr *descr = &lock->cll_descr; - int rc = 0; - - if (descr->cld_mode >= CLM_WRITE) { - result = osc_cache_writeback_range(env, obj, - descr->cld_start, descr->cld_end, - 1, discard); - CDEBUG(D_DLMTRACE, "write out %d pages for lock %p.\n", - result, lock); - if (result > 0) - result = 0; - } + if (olck->ols_hold) { + olck->ols_hold = 0; + osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode); + olck->ols_handle.cookie = 0ULL; + } - rc = cl_lock_discard_pages(env, lock); - if (result == 0 && rc < 0) - result = rc; + olck->ols_dlmlock = NULL; - cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - if (result == 0) { - ols->ols_flush = 1; - LINVRNT(!osc_lock_has_pages(ols)); - } - RETURN(result); + /* release a reference taken in osc_lock_upcall(). */ + LASSERT(olck->ols_has_ref); + lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); + LDLM_LOCK_RELEASE(dlmlock); + olck->ols_has_ref = 0; } /** @@ -1375,363 +1053,183 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) static void osc_lock_cancel(const struct lu_env *env, const struct cl_lock_slice *slice) { - struct cl_lock *lock = slice->cls_lock; - struct osc_lock *olck = cl2osc_lock(slice); - struct ldlm_lock *dlmlock = olck->ols_lock; - int result = 0; - int discard; - - LASSERT(cl_lock_is_mutexed(lock)); - LINVRNT(osc_lock_invariant(olck)); - - if (dlmlock != NULL) { - int do_cancel; - - discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA); - result = osc_lock_flush(olck, discard); - osc_lock_unhold(olck); - - lock_res_and_lock(dlmlock); - /* Now that we're the only user of dlm read/write reference, - * mostly the ->l_readers + ->l_writers should be zero. - * However, there is a corner case. - * See bug 18829 for details.*/ - do_cancel = (dlmlock->l_readers == 0 && - dlmlock->l_writers == 0); - dlmlock->l_flags |= LDLM_FL_CBPENDING; - unlock_res_and_lock(dlmlock); - if (do_cancel) - result = ldlm_cli_cancel(&olck->ols_handle); - if (result < 0) - CL_LOCK_DEBUG(D_ERROR, env, lock, - "lock %p cancel failure with error(%d)\n", - lock, result); - } - olck->ols_state = OLS_CANCELLED; - olck->ols_flags &= ~LDLM_FL_LVB_READY; - osc_lock_detach(env, olck); -} - -#ifdef INVARIANT_CHECK -static int check_cb(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, void *cbdata) -{ - struct cl_lock *lock = cbdata; - - if (lock->cll_descr.cld_mode == CLM_READ) { - struct cl_lock *tmp; - tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, - page, lock, 1, 0); - if (tmp != NULL) { - cl_lock_put(env, tmp); - return CLP_GANG_OKAY; - } - } - - CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n"); - CL_PAGE_DEBUG(D_ERROR, env, page, "\n"); - return CLP_GANG_ABORT; -} - -/** - * Returns true iff there are pages under \a olck not protected by other - * locks. - */ -static int osc_lock_has_pages(struct osc_lock *olck) -{ - struct cl_lock *lock; - struct cl_lock_descr *descr; - struct cl_object *obj; - struct osc_object *oob; - struct cl_env_nest nest; - struct cl_io *io; - struct lu_env *env; - int result; - - env = cl_env_nested_get(&nest); - if (IS_ERR(env)) - return 0; - - obj = olck->ols_cl.cls_obj; - oob = cl2osc(obj); - io = &oob->oo_debug_io; - lock = olck->ols_cl.cls_lock; - descr = &lock->cll_descr; - - cfs_mutex_lock(&oob->oo_debug_mutex); - - io->ci_obj = cl_object_top(obj); - io->ci_ignore_layout = 1; - cl_io_init(env, io, CIT_MISC, io->ci_obj); - do { - result = cl_page_gang_lookup(env, obj, io, - descr->cld_start, descr->cld_end, - check_cb, (void *)lock); - if (result == CLP_GANG_ABORT) - break; - if (result == CLP_GANG_RESCHED) - cfs_cond_resched(); - } while (result != CLP_GANG_OKAY); - cl_io_fini(env, io); - cfs_mutex_unlock(&oob->oo_debug_mutex); - cl_env_nested_put(&nest, env); - - return (result == CLP_GANG_ABORT); -} -#else -static int osc_lock_has_pages(struct osc_lock *olck) -{ - return 0; -} -#endif /* INVARIANT_CHECK */ - -static void osc_lock_delete(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *olck; + struct osc_object *obj = cl2osc(slice->cls_obj); + struct osc_lock *oscl = cl2osc_lock(slice); - olck = cl2osc_lock(slice); - if (olck->ols_glimpse) { - LASSERT(!olck->ols_hold); - LASSERT(!olck->ols_lock); - return; - } + ENTRY; - LINVRNT(osc_lock_invariant(olck)); - LINVRNT(!osc_lock_has_pages(olck)); + LINVRNT(osc_lock_invariant(oscl)); - osc_lock_unhold(olck); - osc_lock_detach(env, olck); -} + osc_lock_detach(env, oscl); + oscl->ols_state = OLS_CANCELLED; + oscl->ols_flags &= ~LDLM_FL_LVB_READY; -/** - * Implements cl_lock_operations::clo_state() method for osc layer. - * - * Maintains osc_lock::ols_owner field. - * - * This assumes that lock always enters CLS_HELD (from some other state) in - * the same IO context as one that requested the lock. This should not be a - * problem, because context is by definition shared by all activity pertaining - * to the same high-level IO. - */ -static void osc_lock_state(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state state) -{ - struct osc_lock *lock = cl2osc_lock(slice); - - /* - * XXX multiple io contexts can use the lock at the same time. - */ - LINVRNT(osc_lock_invariant(lock)); - if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { - struct osc_io *oio = osc_env_io(env); - - LASSERT(lock->ols_owner == NULL); - lock->ols_owner = oio; - } else if (state != CLS_HELD) - lock->ols_owner = NULL; + osc_lock_wake_waiters(env, obj, oscl); + EXIT; } static int osc_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) -{ - struct osc_lock *lock = cl2osc_lock(slice); - - /* - * XXX print ldlm lock and einfo properly. - */ - (*p)(env, cookie, "%p %08x "LPX64" %d %p ", - lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, - lock->ols_state, lock->ols_owner); - osc_lvb_print(env, cookie, p, &lock->ols_lvb); - return 0; -} - -static int osc_lock_fits_into(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *need, - const struct cl_io *io) + lu_printer_t p, const struct cl_lock_slice *slice) { - struct osc_lock *ols = cl2osc_lock(slice); + struct osc_lock *lock = cl2osc_lock(slice); - if (need->cld_enq_flags & CEF_NEVER) - return 0; - - if (ols->ols_state >= OLS_CANCELLED) - return 0; - - if (need->cld_mode == CLM_PHANTOM) { - if (ols->ols_agl) - return !(ols->ols_state > OLS_RELEASED); - - /* - * Note: the QUEUED lock can't be matched here, otherwise - * it might cause the deadlocks. - * In read_process, - * P1: enqueued read lock, create sublock1 - * P2: enqueued write lock, create sublock2(conflicted - * with sublock1). - * P1: Grant read lock. - * P1: enqueued glimpse lock(with holding sublock1_read), - * matched with sublock2, waiting sublock2 to be granted. - * But sublock2 can not be granted, because P1 - * will not release sublock1. Bang! - */ - if (ols->ols_state < OLS_GRANTED || - ols->ols_state > OLS_RELEASED) - return 0; - } else if (need->cld_enq_flags & CEF_MUST) { - /* - * If the lock hasn't ever enqueued, it can't be matched - * because enqueue process brings in many information - * which can be used to determine things such as lockless, - * CEF_MUST, etc. - */ - if (ols->ols_state < OLS_UPCALL_RECEIVED && - ols->ols_locklessable) - return 0; - } - return 1; + (*p)(env, cookie, "%p "LPX64" "LPX64" %d %p ", + lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie, + lock->ols_state, lock->ols_owner); + osc_lvb_print(env, cookie, p, &lock->ols_lvb); + return 0; } static const struct cl_lock_operations osc_lock_ops = { .clo_fini = osc_lock_fini, .clo_enqueue = osc_lock_enqueue, - .clo_wait = osc_lock_wait, - .clo_unuse = osc_lock_unuse, - .clo_use = osc_lock_use, - .clo_delete = osc_lock_delete, - .clo_state = osc_lock_state, .clo_cancel = osc_lock_cancel, - .clo_weigh = osc_lock_weigh, .clo_print = osc_lock_print, - .clo_fits_into = osc_lock_fits_into, }; -static int osc_lock_lockless_unuse(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - struct cl_lock *lock = slice->cls_lock; - - LASSERT(ols->ols_state == OLS_GRANTED); - LINVRNT(osc_lock_invariant(ols)); - - cl_lock_cancel(env, lock); - cl_lock_delete(env, lock); - return 0; -} - static void osc_lock_lockless_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice) { - struct osc_lock *ols = cl2osc_lock(slice); - int result; - - result = osc_lock_flush(ols, 0); + struct osc_lock *ols = cl2osc_lock(slice); + struct osc_object *osc = cl2osc(slice->cls_obj); + struct cl_lock_descr *descr = &slice->cls_lock->cll_descr; + int result; + + LASSERT(ols->ols_dlmlock == NULL); + result = osc_lock_flush(osc, descr->cld_start, descr->cld_end, + descr->cld_mode, 0); if (result) CERROR("Pages for lockless lock %p were not purged(%d)\n", ols, result); - ols->ols_state = OLS_CANCELLED; -} - -static int osc_lock_lockless_wait(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *olck = cl2osc_lock(slice); - struct cl_lock *lock = olck->ols_cl.cls_lock; - LINVRNT(osc_lock_invariant(olck)); - LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED); - - return lock->cll_error; -} - -static void osc_lock_lockless_state(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state state) -{ - struct osc_lock *lock = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(lock)); - if (state == CLS_HELD) { - struct osc_io *oio = osc_env_io(env); - - LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio)); - lock->ols_owner = oio; - - /* set the io to be lockless if this lock is for io's - * host object */ - if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) - oio->oi_lockless = 1; - } -} - -static int osc_lock_lockless_fits_into(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *need, - const struct cl_io *io) -{ - struct osc_lock *lock = cl2osc_lock(slice); - - if (!(need->cld_enq_flags & CEF_NEVER)) - return 0; - - /* lockless lock should only be used by its owning io. b22147 */ - return (lock->ols_owner == osc_env_io(env)); + osc_lock_wake_waiters(env, osc, ols); } static const struct cl_lock_operations osc_lock_lockless_ops = { .clo_fini = osc_lock_fini, .clo_enqueue = osc_lock_enqueue, - .clo_wait = osc_lock_lockless_wait, - .clo_unuse = osc_lock_lockless_unuse, - .clo_state = osc_lock_lockless_state, - .clo_fits_into = osc_lock_lockless_fits_into, .clo_cancel = osc_lock_lockless_cancel, .clo_print = osc_lock_print }; -int osc_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *unused) +static void osc_lock_set_writer(const struct lu_env *env, + const struct cl_io *io, + struct cl_object *obj, struct osc_lock *oscl) { - struct osc_lock *clk; - int result; - - OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO); - if (clk != NULL) { - osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); - cfs_atomic_set(&clk->ols_pageref, 0); - clk->ols_state = OLS_NEW; - cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops); - result = 0; - } else - result = -ENOMEM; - return result; + struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; + pgoff_t io_start; + pgoff_t io_end; + + if (!cl_object_same(io->ci_obj, obj)) + return; + + if (likely(io->ci_type == CIT_WRITE)) { + io_start = cl_index(obj, io->u.ci_rw.crw_pos); + io_end = cl_index(obj, io->u.ci_rw.crw_pos + + io->u.ci_rw.crw_count - 1); + if (cl_io_is_append(io)) { + io_start = 0; + io_end = CL_PAGE_EOF; + } + } else { + LASSERT(cl_io_is_mkwrite(io)); + io_start = io_end = io->u.ci_fault.ft_index; + } + + if (descr->cld_mode >= CLM_WRITE && + descr->cld_start <= io_start && descr->cld_end >= io_end) { + struct osc_io *oio = osc_env_io(env); + + /* There must be only one lock to match the write region */ + LASSERT(oio->oi_write_osclock == NULL); + oio->oi_write_osclock = oscl; + } } -int osc_dlm_lock_pageref(struct ldlm_lock *dlm) +int osc_lock_init(const struct lu_env *env, + struct cl_object *obj, struct cl_lock *lock, + const struct cl_io *io) { - struct osc_lock *olock; - int rc = 0; - - cfs_spin_lock(&osc_ast_guard); - olock = dlm->l_ast_data; - /* - * there's a very rare race with osc_page_addref_lock(), but that - * doesn't matter because in the worst case we don't cancel a lock - * which we actually can, that's no harm. - */ - if (olock != NULL && - cfs_atomic_add_return(_PAGEREF_MAGIC, - &olock->ols_pageref) != _PAGEREF_MAGIC) { - cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); - rc = 1; - } - cfs_spin_unlock(&osc_ast_guard); - return rc; + struct osc_lock *oscl; + __u32 enqflags = lock->cll_descr.cld_enq_flags; + + OBD_SLAB_ALLOC_PTR_GFP(oscl, osc_lock_kmem, GFP_NOFS); + if (oscl == NULL) + return -ENOMEM; + + oscl->ols_state = OLS_NEW; + spin_lock_init(&oscl->ols_lock); + INIT_LIST_HEAD(&oscl->ols_waiting_list); + INIT_LIST_HEAD(&oscl->ols_wait_entry); + INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj); + + oscl->ols_flags = osc_enq2ldlm_flags(enqflags); + oscl->ols_agl = !!(enqflags & CEF_AGL); + if (oscl->ols_agl) + oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT; + if (oscl->ols_flags & LDLM_FL_HAS_INTENT) { + oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED; + oscl->ols_glimpse = 1; + } + + cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops); + + if (!(enqflags & CEF_MUST)) + /* try to convert this lock to a lockless lock */ + osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER)); + if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) + oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; + + if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io)) + osc_lock_set_writer(env, io, obj, oscl); + + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags "LPX64"\n", + lock, oscl, oscl->ols_flags); + + return 0; } +/** + * Finds an existing lock covering given index and optionally different from a + * given \a except lock. + */ +struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, + struct osc_object *obj, pgoff_t index, + enum osc_dap_flags dap_flags) +{ + struct osc_thread_info *info = osc_env_info(env); + struct ldlm_res_id *resname = &info->oti_resname; + ldlm_policy_data_t *policy = &info->oti_policy; + struct lustre_handle lockh; + struct ldlm_lock *lock = NULL; + ldlm_mode_t mode; + __u64 flags; + + ENTRY; + + ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); + osc_index2policy(policy, osc2cl(obj), index, index); + policy->l_extent.gid = LDLM_GID_ANY; + + flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING; + if (dap_flags & OSC_DAP_FL_TEST_LOCK) + flags |= LDLM_FL_TEST_LOCK; + /* + * It is fine to match any group lock since there could be only one + * with a uniq gid and it conflicts with all other lock modes too + */ +again: + mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace, + flags, resname, LDLM_EXTENT, policy, + LCK_PR | LCK_PW | LCK_GROUP, &lockh, + dap_flags & OSC_DAP_FL_CANCELING); + if (mode != 0) { + lock = ldlm_handle2lock(&lockh); + /* RACE: the lock is cancelled so let's try again */ + if (unlikely(lock == NULL)) + goto again; + } + + RETURN(lock); +} /** @} osc */