X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_lock.c;h=459944abe4dfa552280258274efe4948b03733a1;hp=ddf9a34439567aed25c67d613150fa6a8d65a6cb;hb=f625f670afbe954030ff81f0f8522137d6cdd335;hpb=15385c3b934b511a1452327c701fbb6adad71416 diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index ddf9a34..459944a 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,22 +36,21 @@ * Implementation of cl_lock for OSC layer. * * Author: Nikita Danilov + * Author: Jinshan Xiong */ -/** \addtogroup osc osc @{ */ - #define DEBUG_SUBSYSTEM S_OSC -#ifdef __KERNEL__ -# include -#else -# include -#endif +#include /* fid_build_reg_res_name() */ #include #include "osc_cl_internal.h" +/** \addtogroup osc + * @{ + */ + /***************************************************************************** * * Type conversions. @@ -88,35 +87,49 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) */ static int osc_lock_invariant(struct osc_lock *ols) { - struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); - struct ldlm_lock *olock = ols->ols_lock; - int handle_used = lustre_handle_is_used(&ols->ols_handle); - - return - ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL) || - (ergo(olock != NULL, handle_used) && - ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie) && - /* - * Check that ->ols_handle and ->ols_lock are consistent, but - * take into account that they are set at the different time. - */ - ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL)) && - ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used) && - /* - * DLM lock is destroyed only after we have seen cancellation - * ast. - */ - ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - !olock->l_destroyed) && - ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)); + struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); + struct ldlm_lock *olock = ols->ols_dlmlock; + int handle_used = lustre_handle_is_used(&ols->ols_handle); + + if (ergo(osc_lock_is_lockless(ols), + ols->ols_locklessable && ols->ols_dlmlock == NULL)) + return 1; + + /* + * If all the following "ergo"s are true, return 1, otherwise 0 + */ + if (! ergo(olock != NULL, handle_used)) + return 0; + + if (! ergo(olock != NULL, + olock->l_handle.h_cookie == ols->ols_handle.cookie)) + return 0; + + if (! ergo(handle_used, + ergo(lock != NULL && olock != NULL, lock == olock) && + ergo(lock == NULL, olock == NULL))) + return 0; + /* + * Check that ->ols_handle and ->ols_dlmlock are consistent, but + * take into account that they are set at the different time. + */ + if (! ergo(ols->ols_state == OLS_CANCELLED, + olock == NULL && !handle_used)) + return 0; + /* + * DLM lock is destroyed only after we have seen cancellation + * ast. + */ + if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, + !ldlm_is_destroyed(olock))) + return 0; + + if (! ergo(ols->ols_state == OLS_GRANTED, + olock != NULL && + olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) + return 0; + return 1; } /***************************************************************************** @@ -125,114 +138,15 @@ static int osc_lock_invariant(struct osc_lock *ols) * */ -/** - * Breaks a link between osc_lock and dlm_lock. - */ -static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) -{ - struct ldlm_lock *dlmlock; - - spin_lock(&osc_ast_guard); - dlmlock = olck->ols_lock; - if (dlmlock == NULL) { - spin_unlock(&osc_ast_guard); - return; - } - - olck->ols_lock = NULL; - /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ - dlmlock->l_ast_data = NULL; - olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); - - lock_res_and_lock(dlmlock); - if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { - struct cl_object *obj = olck->ols_cl.cls_obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms; - - /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ - attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); - unlock_res_and_lock(dlmlock); - - cl_object_attr_lock(obj); - cl_object_attr_set(env, obj, attr, CAT_KMS); - cl_object_attr_unlock(obj); - } else - unlock_res_and_lock(dlmlock); - - /* release a reference taken in osc_lock_upcall0(). */ - lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); - LDLM_LOCK_RELEASE(dlmlock); -} - -static int osc_lock_unuse(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - int result; - - LASSERT(ols->ols_state == OLS_GRANTED || - ols->ols_state == OLS_UPCALL_RECEIVED); - LINVRNT(osc_lock_invariant(ols)); - - if (ols->ols_glimpse) { - LASSERT(ols->ols_hold == 0); - return 0; - } - LASSERT(ols->ols_hold); - - /* - * Move lock into OLS_RELEASED state before calling osc_cancel_base() - * so that possible synchronous cancellation (that always happens - * e.g., for liblustre) sees that lock is released. - */ - ols->ols_state = OLS_RELEASED; - ols->ols_hold = 0; - result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode); - ols->ols_has_ref = 0; - return result; -} - static void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) { - struct osc_lock *ols = cl2osc_lock(slice); - - LINVRNT(osc_lock_invariant(ols)); - /* - * ->ols_hold can still be true at this point if, for example, a - * thread that requested a lock was killed (and released a reference - * to the lock), before reply from a server was received. In this case - * lock is destroyed immediately after upcall. - */ - if (ols->ols_hold) - osc_lock_unuse(env, slice); - LASSERT(ols->ols_lock == NULL); - - OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); -} + struct osc_lock *ols = cl2osc_lock(slice); -void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj, - struct ldlm_res_id *resname) -{ - const struct lu_fid *fid = lu_object_fid(&obj->oo_cl.co_lu); - if (0) { - /* - * In the perfect world of the future, where ost servers talk - * idif-fids... - */ - fid_build_reg_res_name(fid, resname); - } else { - /* - * In reality, where ost server expects ->lsm_object_id and - * ->lsm_object_gr in rename. - */ - osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr, - resname); - } + LINVRNT(osc_lock_invariant(ols)); + LASSERT(ols->ols_dlmlock == NULL); + + OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); } static void osc_lock_build_policy(const struct lu_env *env, @@ -245,64 +159,21 @@ static void osc_lock_build_policy(const struct lu_env *env, policy->l_extent.gid = d->cld_gid; } -static int osc_enq2ldlm_flags(__u32 enqflags) -{ - int result = 0; - - LASSERT((enqflags & ~CEF_MASK) == 0); - - if (enqflags & CEF_NONBLOCK) - result |= LDLM_FL_BLOCK_NOWAIT; - if (enqflags & CEF_ASYNC) - result |= LDLM_FL_HAS_INTENT; - if (enqflags & CEF_DISCARD_DATA) - result |= LDLM_AST_DISCARD_DATA; - return result; -} - -/** - * Global spin-lock protecting consistency of ldlm_lock::l_ast_data - * pointers. Initialized in osc_init(). - */ -spinlock_t osc_ast_guard; - -static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) -{ - struct osc_lock *olck; - - lock_res_and_lock(dlm_lock); - spin_lock(&osc_ast_guard); - olck = dlm_lock->l_ast_data; - if (olck != NULL) { - struct cl_lock *lock = olck->ols_cl.cls_lock; - /* - * If osc_lock holds a reference on ldlm lock, return it even - * when cl_lock is in CLS_FREEING state. This way - * - * osc_ast_data_get(dlmlock) == NULL - * - * guarantees that all osc references on dlmlock were - * released. osc_dlm_blocking_ast0() relies on that. - */ - if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) { - cl_lock_get_trust(lock); - lu_ref_add_atomic(&lock->cll_reference, - "ast", cfs_current()); - } else - olck = NULL; - } - spin_unlock(&osc_ast_guard); - unlock_res_and_lock(dlm_lock); - return olck; -} - -static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck) +static __u64 osc_enq2ldlm_flags(__u32 enqflags) { - struct cl_lock *lock; - - lock = olck->ols_cl.cls_lock; - lu_ref_del(&lock->cll_reference, "ast", cfs_current()); - cl_lock_put(env, lock); + __u64 result = 0; + + LASSERT((enqflags & ~CEF_MASK) == 0); + + if (enqflags & CEF_NONBLOCK) + result |= LDLM_FL_BLOCK_NOWAIT; + if (enqflags & CEF_ASYNC) + result |= LDLM_FL_HAS_INTENT; + if (enqflags & CEF_DISCARD_DATA) + result |= LDLM_FL_AST_DISCARD_DATA; + if (enqflags & CEF_PEEK) + result |= LDLM_FL_TEST_LOCK; + return result; } /** @@ -315,40 +186,34 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck) * * Called under lock and resource spin-locks. */ -static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, - int rc) +static void osc_lock_lvb_update(const struct lu_env *env, + struct osc_object *osc, + struct ldlm_lock *dlmlock, + struct ost_lvb *lvb) { - struct ost_lvb *lvb; - struct cl_object *obj; - struct lov_oinfo *oinfo; - struct cl_attr *attr; - unsigned valid; + struct cl_object *obj = osc2cl(osc); + struct lov_oinfo *oinfo = osc->oo_oinfo; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + unsigned valid; - ENTRY; - - if (!(olck->ols_flags & LDLM_FL_LVB_READY)) { - EXIT; - return; - } + ENTRY; - lvb = &olck->ols_lvb; - obj = olck->ols_cl.cls_obj; - oinfo = cl2osc(obj)->oo_oinfo; - attr = &osc_env_info(env)->oti_attr; - valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; - cl_lvb2attr(attr, lvb); + valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE; + if (lvb == NULL) { + LASSERT(dlmlock != NULL); + lvb = dlmlock->l_lvb_data; + } + cl_lvb2attr(attr, lvb); - cl_object_attr_lock(obj); - if (rc == 0) { - struct ldlm_lock *dlmlock; - __u64 size; + cl_object_attr_lock(obj); + if (dlmlock != NULL) { + __u64 size; - dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); + check_res_locked(dlmlock->l_resource); - /* re-grab LVB from a dlm lock under DLM spin-locks. */ - *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; + LASSERT(lvb == dlmlock->l_lvb_data); size = lvb->lvb_size; + /* Extend KMS up to the end of this lock and no further * A lock on [x,y] means a KMS of up to y + 1 bytes! */ if (size > dlmlock->l_policy_data.l_extent.end) @@ -364,229 +229,195 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, lvb->lvb_size, oinfo->loi_kms, dlmlock->l_policy_data.l_extent.end); } - ldlm_lock_allow_match_locked(dlmlock); - } else if (rc == -ENAVAIL && olck->ols_glimpse) { - CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving" - " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms); - } else - valid = 0; + ldlm_lock_allow_match_locked(dlmlock); + } - if (valid != 0) - cl_object_attr_set(env, obj, attr, valid); + cl_object_attr_update(env, obj, attr, valid); + cl_object_attr_unlock(obj); - cl_object_attr_unlock(obj); + EXIT; +} - EXIT; +static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl, + struct lustre_handle *lockh, bool lvb_update) +{ + struct ldlm_lock *dlmlock; + + dlmlock = ldlm_handle2lock_long(lockh, 0); + LASSERT(dlmlock != NULL); + + /* lock reference taken by ldlm_handle2lock_long() is + * owned by osc_lock and released in osc_lock_detach() + */ + lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl); + oscl->ols_has_ref = 1; + + LASSERT(oscl->ols_dlmlock == NULL); + oscl->ols_dlmlock = dlmlock; + + /* This may be a matched lock for glimpse request, do not hold + * lock reference in that case. */ + if (!oscl->ols_glimpse) { + /* hold a refc for non glimpse lock which will + * be released in osc_lock_cancel() */ + lustre_handle_copy(&oscl->ols_handle, lockh); + ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode); + oscl->ols_hold = 1; + } + + /* Lock must have been granted. */ + lock_res_and_lock(dlmlock); + if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { + struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent; + struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; + + /* extend the lock extent, otherwise it will have problem when + * we decide whether to grant a lockless lock. */ + descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); + descr->cld_start = cl_index(descr->cld_obj, ext->start); + descr->cld_end = cl_index(descr->cld_obj, ext->end); + descr->cld_gid = ext->gid; + + /* no lvb update for matched lock */ + if (lvb_update) { + LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); + osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj), + dlmlock, NULL); + } + LINVRNT(osc_lock_invariant(oscl)); + } + unlock_res_and_lock(dlmlock); + + LASSERT(oscl->ols_state != OLS_GRANTED); + oscl->ols_state = OLS_GRANTED; } /** - * Called when a lock is granted, from an upcall (when server returned a - * granted lock), or from completion AST, when server returned a blocked lock. - * - * Called under lock and resource spin-locks, that are released temporarily - * here. + * Lock upcall function that is executed either when a reply to ENQUEUE rpc is + * received from a server, or after osc_enqueue_base() matched a local DLM + * lock. */ -static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, - struct ldlm_lock *dlmlock, int rc) +static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh, + int errcode) { - struct ldlm_extent *ext; - struct cl_lock *lock; - struct cl_lock_descr *descr; - - LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); - - ENTRY; - if (olck->ols_state != OLS_GRANTED) { - lock = olck->ols_cl.cls_lock; - ext = &dlmlock->l_policy_data.l_extent; - descr = &osc_env_info(env)->oti_descr; - descr->cld_obj = lock->cll_descr.cld_obj; - - /* XXX check that ->l_granted_mode is valid. */ - descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); - descr->cld_start = cl_index(descr->cld_obj, ext->start); - descr->cld_end = cl_index(descr->cld_obj, ext->end); - descr->cld_gid = ext->gid; - /* - * tell upper layers the extent of the lock that was actually - * granted - */ - olck->ols_state = OLS_GRANTED; - osc_lock_lvb_update(env, olck, rc); - - /* release DLM spin-locks to allow cl_lock_{modify,signal}() - * to take a semaphore on a parent lock. This is safe, because - * spin-locks are needed to protect consistency of - * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ - unlock_res_and_lock(dlmlock); - cl_lock_modify(env, lock, descr); - cl_lock_signal(env, lock); - LINVRNT(osc_lock_invariant(olck)); - lock_res_and_lock(dlmlock); - } - EXIT; + struct osc_lock *oscl = cookie; + struct cl_lock_slice *slice = &oscl->ols_cl; + struct lu_env *env; + struct cl_env_nest nest; + int rc; + + ENTRY; + + env = cl_env_nested_get(&nest); + /* should never happen, similar to osc_ldlm_blocking_ast(). */ + LASSERT(!IS_ERR(env)); + + rc = ldlm_error2errno(errcode); + if (oscl->ols_state == OLS_ENQUEUED) { + oscl->ols_state = OLS_UPCALL_RECEIVED; + } else if (oscl->ols_state == OLS_CANCELLED) { + rc = -EIO; + } else { + CERROR("Impossible state: %d\n", oscl->ols_state); + LBUG(); + } + + if (rc == 0) + osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK); + + /* Error handling, some errors are tolerable. */ + if (oscl->ols_locklessable && rc == -EUSERS) { + /* This is a tolerable error, turn this lock into + * lockless lock. + */ + osc_object_set_contended(cl2osc(slice->cls_obj)); + LASSERT(slice->cls_ops == &osc_lock_ops); + + /* Change this lock to ldlmlock-less lock. */ + osc_lock_to_lockless(env, oscl, 1); + oscl->ols_state = OLS_GRANTED; + rc = 0; + } else if (oscl->ols_glimpse && rc == -ENAVAIL) { + LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY); + osc_lock_lvb_update(env, cl2osc(slice->cls_obj), + NULL, &oscl->ols_lvb); + /* Hide the error. */ + rc = 0; + } + + if (oscl->ols_owner != NULL) + cl_sync_io_note(env, oscl->ols_owner, rc); + cl_env_nested_put(&nest, env); + + RETURN(rc); } -static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) - +static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh, + int errcode) { - struct ldlm_lock *dlmlock; + struct osc_object *osc = cookie; + struct ldlm_lock *dlmlock; + struct lu_env *env; + struct cl_env_nest nest; + ENTRY; - ENTRY; + env = cl_env_nested_get(&nest); + LASSERT(!IS_ERR(env)); - dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); - - lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); - LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); - olck->ols_lock = dlmlock; - spin_unlock(&osc_ast_guard); - - /* - * Lock might be not yet granted. In this case, completion ast - * (osc_ldlm_completion_ast()) comes later and finishes lock - * granting. - */ - if (dlmlock->l_granted_mode == dlmlock->l_req_mode) - osc_lock_granted(env, olck, dlmlock, 0); - unlock_res_and_lock(dlmlock); - - /* - * osc_enqueue_interpret() decrefs asynchronous locks, counter - * this. - */ - ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); - olck->ols_hold = olck->ols_has_ref = 1; - - /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ - lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); -} + if (errcode == ELDLM_LOCK_MATCHED) + GOTO(out, errcode = ELDLM_OK); -/** - * Lock upcall function that is executed either when a reply to ENQUEUE rpc is - * received from a server, or after osc_enqueue_base() matched a local DLM - * lock. - */ -static int osc_lock_upcall(void *cookie, int errcode) -{ - struct osc_lock *olck = cookie; - struct cl_lock_slice *slice = &olck->ols_cl; - struct cl_lock *lock = slice->cls_lock; - struct lu_env *env; - struct cl_env_nest nest; + if (errcode != ELDLM_OK) + GOTO(out, errcode); - ENTRY; - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - int rc; - - cl_lock_mutex_get(env, lock); - - LASSERT(lock->cll_state >= CLS_QUEUING); - if (olck->ols_state == OLS_ENQUEUED) { - olck->ols_state = OLS_UPCALL_RECEIVED; - rc = ldlm_error2errno(errcode); - } else if (olck->ols_state == OLS_CANCELLED) { - rc = -EIO; - } else { - CERROR("Impossible state: %i\n", olck->ols_state); - LBUG(); - } - if (rc) { - struct ldlm_lock *dlmlock; - - dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { - lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); - dlmlock->l_ast_data = NULL; - olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); - unlock_res_and_lock(dlmlock); - LDLM_LOCK_PUT(dlmlock); - } - } else { - if (olck->ols_glimpse) - olck->ols_glimpse = 0; - osc_lock_upcall0(env, olck); - } + dlmlock = ldlm_handle2lock(lockh); + LASSERT(dlmlock != NULL); - /* Error handling, some errors are tolerable. */ - if (olck->ols_locklessable && rc == -EUSERS) { - /* This is a tolerable error, turn this lock into - * lockless lock. - */ - osc_object_set_contended(cl2osc(slice->cls_obj)); - LASSERT(slice->cls_ops == &osc_lock_ops); - - /* Change this lock to ldlmlock-less lock. */ - osc_lock_to_lockless(env, olck, 1); - olck->ols_state = OLS_GRANTED; - rc = 0; - } else if (olck->ols_glimpse && rc == -ENAVAIL) { - osc_lock_lvb_update(env, olck, rc); - cl_lock_delete(env, lock); - /* Hide the error. */ - rc = 0; - } + lock_res_and_lock(dlmlock); + LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); - if (rc == 0) - /* on error, lock was signaled by cl_lock_error() */ - cl_lock_signal(env, lock); - else - cl_lock_error(env, lock, rc); + /* there is no osc_lock associated with AGL lock */ + osc_lock_lvb_update(env, osc, dlmlock, NULL); - cl_lock_mutex_put(env, lock); + unlock_res_and_lock(dlmlock); + LDLM_LOCK_PUT(dlmlock); - /* release cookie reference, acquired by osc_lock_enqueue() */ - lu_ref_del(&lock->cll_reference, "upcall", lock); - cl_lock_put(env, lock); - cl_env_nested_put(&nest, env); - } else - /* should never happen, similar to osc_ldlm_blocking_ast(). */ - LBUG(); - RETURN(errcode); +out: + cl_object_put(env, osc2cl(osc)); + cl_env_nested_put(&nest, env); + RETURN(ldlm_error2errno(errcode)); } -/** - * Core of osc_dlm_blocking_ast() logic. - */ -static void osc_lock_blocking(const struct lu_env *env, - struct ldlm_lock *dlmlock, - struct osc_lock *olck, int blocking) +static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end, + enum cl_lock_mode mode, int discard) { - struct cl_lock *lock = olck->ols_cl.cls_lock; - - LASSERT(olck->ols_lock == dlmlock); - CLASSERT(OLS_BLOCKED < OLS_CANCELLED); - LASSERT(!osc_lock_is_lockless(olck)); - - if (olck->ols_hold) - /* - * Lock might be still addref-ed here, if e.g., blocking ast - * is sent for a failed lock. - */ - osc_lock_unuse(env, &olck->ols_cl); - - if (blocking && olck->ols_state < OLS_BLOCKED) - /* - * Move osc_lock into OLS_BLOCKED before canceling the lock, - * because it recursively re-enters osc_lock_blocking(), with - * the state set to OLS_CANCELLED. - */ - olck->ols_state = OLS_BLOCKED; - /* - * cancel and destroy lock at least once no matter how blocking ast is - * entered (see comment above osc_ldlm_blocking_ast() for use - * cases). cl_lock_cancel() and cl_lock_delete() are idempotent. - */ - cl_lock_cancel(env, lock); - cl_lock_delete(env, lock); + struct lu_env *env; + struct cl_env_nest nest; + int rc = 0; + int rc2 = 0; + + ENTRY; + + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + if (mode == CLM_WRITE) { + rc = osc_cache_writeback_range(env, obj, start, end, 1, + discard); + CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n", + obj, start, end, rc, + discard ? "discarded" : "written back"); + if (rc > 0) + rc = 0; + } + + rc2 = osc_lock_discard_pages(env, obj, start, end, mode); + if (rc == 0 && rc2 < 0) + rc = rc2; + + cl_env_nested_put(&nest, env); + RETURN(rc); } /** @@ -597,66 +428,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, struct ldlm_lock *dlmlock, void *data, int flag) { - struct osc_lock *olck; - struct cl_lock *lock; - int result; - int cancel; - - LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING); - - cancel = 0; - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; - cl_lock_mutex_get(env, lock); - LINVRNT(osc_lock_invariant(olck)); - if (olck->ols_ast_wait) { - /* wake up osc_lock_use() */ - cl_lock_signal(env, lock); - olck->ols_ast_wait = 0; - } - /* - * Lock might have been canceled while this thread was - * sleeping for lock mutex, but olck is pinned in memory. - */ - if (olck == dlmlock->l_ast_data) { - /* - * NOTE: DLM sends blocking AST's for failed locks - * (that are still in pre-OLS_GRANTED state) - * too, and they have to be canceled otherwise - * DLM lock is never destroyed and stuck in - * the memory. - * - * Alternatively, ldlm_cli_cancel() can be - * called here directly for osc_locks with - * ols_state < OLS_GRANTED to maintain an - * invariant that ->clo_cancel() is only called - * for locks that were granted. - */ - LASSERT(data == olck); - osc_lock_blocking(env, dlmlock, - olck, flag == LDLM_CB_BLOCKING); - } else - cancel = 1; - cl_lock_mutex_put(env, lock); - osc_ast_data_put(env, olck); - } else - /* - * DLM lock exists, but there is no cl_lock attached to it. - * This is a `normal' race. cl_object and its cl_lock's can be - * removed by memory pressure, together with all pages. - */ - cancel = (flag == LDLM_CB_BLOCKING); - - if (cancel) { - struct lustre_handle *lockh; - - lockh = &osc_env_info(env)->oti_handle; - ldlm_lock2handle(dlmlock, lockh); - result = ldlm_cli_cancel(lockh); - } else - result = 0; - return result; + struct cl_object *obj = NULL; + int result = 0; + int discard; + enum cl_lock_mode mode = CLM_READ; + ENTRY; + + LASSERT(flag == LDLM_CB_CANCELING); + + lock_res_and_lock(dlmlock); + if (dlmlock->l_granted_mode != dlmlock->l_req_mode) { + dlmlock->l_ast_data = NULL; + unlock_res_and_lock(dlmlock); + RETURN(0); + } + + discard = ldlm_is_discard_data(dlmlock); + if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)) + mode = CLM_WRITE; + + if (dlmlock->l_ast_data != NULL) { + obj = osc2cl(dlmlock->l_ast_data); + dlmlock->l_ast_data = NULL; + + cl_object_get(obj); + } + + unlock_res_and_lock(dlmlock); + + /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or + * the object has been destroyed. */ + if (obj != NULL) { + struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + __u64 old_kms; + + /* Destroy pages covered by the extent of the DLM lock */ + result = osc_lock_flush(cl2osc(obj), + cl_index(obj, extent->start), + cl_index(obj, extent->end), + mode, discard); + + /* losing a lock, update kms */ + lock_res_and_lock(dlmlock); + cl_object_attr_lock(obj); + /* Must get the value under the lock to avoid race. */ + old_kms = cl2osc(obj)->oo_oinfo->loi_kms; + /* Update the kms. Need to loop all granted locks. + * Not a problem for the client */ + attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); + + cl_object_attr_update(env, obj, attr, CAT_KMS); + cl_object_attr_unlock(obj); + unlock_res_and_lock(dlmlock); + + cl_object_put(env, obj); + } + RETURN(result); } /** @@ -684,7 +512,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, * * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify * us that dlmlock conflicts with another lock that some client is - * enqueing. Lock is canceled. + * enqueuing. Lock is canceled. * * - cl_lock_cancel() is called. osc_lock_cancel() calls * ldlm_cli_cancel() that calls @@ -705,122 +533,81 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock, struct ldlm_lock_desc *new, void *data, int flag) { - struct lu_env *env; - struct cl_env_nest nest; - int result; - - /* - * This can be called in the context of outer IO, e.g., - * - * cl_enqueue()->... - * ->osc_enqueue_base()->... - * ->ldlm_prep_elc_req()->... - * ->ldlm_cancel_callback()->... - * ->osc_ldlm_blocking_ast() - * - * new environment has to be created to not corrupt outer context. - */ - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); - cl_env_nested_put(&nest, env); - } else { - result = PTR_ERR(env); - /* - * XXX This should never happen, as cl_lock is - * stuck. Pre-allocated environment a la vvp_inode_fini_env - * should be used. - */ - LBUG(); - } - if (result != 0) { - if (result == -ENODATA) - result = 0; - else - CERROR("BAST failed: %d\n", result); - } - return result; -} - -static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, - int flags, void *data) -{ - struct cl_env_nest nest; - struct lu_env *env; - struct osc_lock *olck; - struct cl_lock *lock; - int result; - int dlmrc; - - /* first, do dlm part of the work */ - dlmrc = ldlm_completion_ast_async(dlmlock, flags, data); - /* then, notify cl_lock */ - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; - cl_lock_mutex_get(env, lock); - /* - * ldlm_handle_cp_callback() copied LVB from request - * to lock->l_lvb_data, store it in osc_lock. - */ - LASSERT(dlmlock->l_lvb_data != NULL); - lock_res_and_lock(dlmlock); - olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) - /* - * upcall (osc_lock_upcall()) hasn't yet been - * called. Do nothing now, upcall will bind - * olck to dlmlock and signal the waiters. - * - * This maintains an invariant that osc_lock - * and ldlm_lock are always bound when - * osc_lock is in OLS_GRANTED state. - */ - ; - else if (dlmlock->l_granted_mode != LCK_MINMODE) - osc_lock_granted(env, olck, dlmlock, dlmrc); - unlock_res_and_lock(dlmlock); - if (dlmrc != 0) - cl_lock_error(env, lock, dlmrc); - cl_lock_mutex_put(env, lock); - osc_ast_data_put(env, olck); - result = 0; - } else - result = -ELDLM_NO_LOCK_DATA; - cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - return dlmrc ?: result; + int result = 0; + ENTRY; + + switch (flag) { + case LDLM_CB_BLOCKING: { + struct lustre_handle lockh; + + ldlm_lock2handle(dlmlock, &lockh); + result = ldlm_cli_cancel(&lockh, LCF_ASYNC); + if (result == -ENODATA) + result = 0; + break; + } + case LDLM_CB_CANCELING: { + struct lu_env *env; + struct cl_env_nest nest; + + /* + * This can be called in the context of outer IO, e.g., + * + * osc_enqueue_base()->... + * ->ldlm_prep_elc_req()->... + * ->ldlm_cancel_callback()->... + * ->osc_ldlm_blocking_ast() + * + * new environment has to be created to not corrupt outer + * context. + */ + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) { + result = PTR_ERR(env); + break; + } + + result = osc_dlm_blocking_ast0(env, dlmlock, data, flag); + cl_env_nested_put(&nest, env); + break; + } + default: + LBUG(); + } + RETURN(result); } static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) { - struct ptlrpc_request *req = data; - struct osc_lock *olck; - struct cl_lock *lock; - struct cl_object *obj; - struct cl_env_nest nest; - struct lu_env *env; - struct ost_lvb *lvb; - struct req_capsule *cap; - int result; - - LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - /* - * osc_ast_data_get() has to go after environment is - * allocated, because osc_ast_data() acquires a - * reference to a lock, and it can only be released in - * environment. - */ - olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { - lock = olck->ols_cl.cls_lock; - cl_lock_mutex_get(env, lock); + struct ptlrpc_request *req = data; + struct cl_env_nest nest; + struct lu_env *env; + struct ost_lvb *lvb; + struct req_capsule *cap; + int result; + + ENTRY; + + LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK); + + env = cl_env_nested_get(&nest); + if (!IS_ERR(env)) { + struct cl_object *obj = NULL; + + lock_res_and_lock(dlmlock); + if (dlmlock->l_ast_data != NULL) { + obj = osc2cl(dlmlock->l_ast_data); + cl_object_get(obj); + } + unlock_res_and_lock(dlmlock); + + if (obj != NULL) { + /* Do not grab the mutex of cl_lock for glimpse. + * See LU-1274 for details. + * BTW, it's okay for cl_lock to be cancelled during + * this period because server can handle this race. + * See ldlm_server_glimpse_ast() for details. + * cl_lock_mutex_get(env, lock); */ cap = &req->rq_pill; req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, @@ -828,11 +615,14 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) result = req_capsule_server_pack(cap); if (result == 0) { lvb = req_capsule_server_get(cap, &RMF_DLM_LVB); - obj = lock->cll_descr.cld_obj; result = cl_object_glimpse(env, obj, lvb); } - cl_lock_mutex_put(env, lock); - osc_ast_data_put(env, olck); + if (!exp_connect_lvb_type(req->rq_export)) + req_capsule_shrink(&req->rq_pill, + &RMF_DLM_LVB, + sizeof(struct ost_lvb_v1), + RCL_SERVER); + cl_object_put(env, obj); } else { /* * These errors are normal races, so we don't want to @@ -843,174 +633,122 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) result = -ELDLM_NO_LOCK_DATA; } cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - req->rq_status = result; - return result; + } else + result = PTR_ERR(env); + req->rq_status = result; + RETURN(result); +} + +static int weigh_cb(const struct lu_env *env, struct cl_io *io, + struct osc_page *ops, void *cbdata) +{ + struct cl_page *page = ops->ops_cl.cpl_page; + + if (cl_page_is_vmlocked(env, page) + || PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage) + ) + return CLP_GANG_ABORT; + + *(pgoff_t *)cbdata = osc_index(ops) + 1; + return CLP_GANG_OKAY; } -static unsigned long osc_lock_weigh(const struct lu_env *env, - const struct cl_lock_slice *slice) +static unsigned long osc_lock_weight(const struct lu_env *env, + struct osc_object *oscobj, + struct ldlm_extent *extent) { - /* - * don't need to grab coh_page_guard since we don't care the exact # - * of pages.. - */ - return cl_object_header(slice->cls_obj)->coh_pages; + struct cl_io *io = &osc_env_info(env)->oti_io; + struct cl_object *obj = cl_object_top(&oscobj->oo_cl); + pgoff_t page_index; + int result; + ENTRY; + + io->ci_obj = obj; + io->ci_ignore_layout = 1; + result = cl_io_init(env, io, CIT_MISC, io->ci_obj); + if (result != 0) + RETURN(result); + + page_index = cl_index(obj, extent->start); + do { + result = osc_page_gang_lookup(env, io, oscobj, + page_index, + cl_index(obj, extent->end), + weigh_cb, (void *)&page_index); + if (result == CLP_GANG_ABORT) + break; + if (result == CLP_GANG_RESCHED) + cond_resched(); + } while (result != CLP_GANG_OKAY); + cl_io_fini(env, io); + + return result == CLP_GANG_ABORT ? 1 : 0; } /** * Get the weight of dlm lock for early cancellation. - * - * XXX: it should return the pages covered by this \a dlmlock. */ -static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) +unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) { - struct cl_env_nest nest; - struct lu_env *env; - struct osc_lock *lock; - struct cl_lock *cll; - unsigned long weight; - ENTRY; - - might_sleep(); - /* - * osc_ldlm_weigh_ast has a complex context since it might be called - * because of lock canceling, or from user's input. We have to make - * a new environment for it. Probably it is implementation safe to use - * the upper context because cl_lock_put don't modify environment - * variables. But in case of .. - */ - env = cl_env_nested_get(&nest); - if (IS_ERR(env)) - /* Mostly because lack of memory, tend to eliminate this lock*/ - RETURN(0); - - LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT); - lock = osc_ast_data_get(dlmlock); - if (lock == NULL) { - /* cl_lock was destroyed because of memory pressure. - * It is much reasonable to assign this type of lock - * a lower cost. - */ - GOTO(out, weight = 0); - } - - cll = lock->ols_cl.cls_lock; - cl_lock_mutex_get(env, cll); - weight = cl_lock_weigh(env, cll); - cl_lock_mutex_put(env, cll); - osc_ast_data_put(env, lock); - EXIT; + struct cl_env_nest nest; + struct lu_env *env; + struct osc_object *obj; + struct osc_lock *oscl; + unsigned long weight; + bool found = false; + ENTRY; + + might_sleep(); + /* + * osc_ldlm_weigh_ast has a complex context since it might be called + * because of lock canceling, or from user's input. We have to make + * a new environment for it. Probably it is implementation safe to use + * the upper context because cl_lock_put don't modify environment + * variables. But just in case .. + */ + env = cl_env_nested_get(&nest); + if (IS_ERR(env)) + /* Mostly because lack of memory, do not eliminate this lock */ + RETURN(1); + + LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT); + obj = dlmlock->l_ast_data; + if (obj == NULL) + GOTO(out, weight = 1); + + spin_lock(&obj->oo_ol_spin); + list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) { + if (oscl->ols_dlmlock != NULL && oscl->ols_dlmlock != dlmlock) + continue; + found = true; + } + spin_unlock(&obj->oo_ol_spin); + if (found) { + /* + * If the lock is being used by an IO, definitely not cancel it. + */ + GOTO(out, weight = 1); + } + + weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent); + EXIT; out: - cl_env_nested_put(&nest, env); - return weight; + cl_env_nested_put(&nest, env); + return weight; } static void osc_lock_build_einfo(const struct lu_env *env, - const struct cl_lock *clock, - struct osc_lock *lock, - struct ldlm_enqueue_info *einfo) + const struct cl_lock *lock, + struct osc_object *osc, + struct ldlm_enqueue_info *einfo) { - enum cl_lock_mode mode; - - mode = clock->cll_descr.cld_mode; - if (mode == CLM_PHANTOM) - /* - * For now, enqueue all glimpse locks in read mode. In the - * future, client might choose to enqueue LCK_PW lock for - * glimpse on a file opened for write. - */ - mode = CLM_READ; - - einfo->ei_type = LDLM_EXTENT; - einfo->ei_mode = osc_cl_lock2ldlm(mode); - einfo->ei_cb_bl = osc_ldlm_blocking_ast; - einfo->ei_cb_cp = osc_ldlm_completion_ast; - einfo->ei_cb_gl = osc_ldlm_glimpse_ast; - einfo->ei_cb_wg = osc_ldlm_weigh_ast; - einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ -} - -static int osc_lock_delete0(struct cl_lock *conflict) -{ - struct cl_env_nest nest; - struct lu_env *env; - int rc = 0; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - cl_lock_delete(env, conflict); - cl_env_nested_put(&nest, env); - } else - rc = PTR_ERR(env); - return rc; -} -/** - * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This - * is called as a part of enqueuing to cancel conflicting locks early. - * - * \retval 0: success, \a conflict was cancelled and destroyed. - * - * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was - * released in the process. Repeat enqueing. - * - * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and - * either \a lock is non-blocking, or current thread - * holds other locks, that prevent it from waiting - * for cancel to complete. - * - * \retval -ve: other error, including -EINTR. - * - */ -static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock *conflict, int canwait) -{ - int rc; - - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(cl_lock_is_mutexed(conflict)); - - rc = 0; - if (conflict->cll_state != CLS_FREEING) { - cl_lock_cancel(env, conflict); - rc = osc_lock_delete0(conflict); - if (rc) - return rc; - if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) { - rc = -EWOULDBLOCK; - if (cl_lock_nr_mutexed(env) > 2) - /* - * If mutices of locks other than @lock and - * @scan are held by the current thread, it - * cannot wait on @scan state change in a - * dead-lock safe matter, so simply skip early - * cancellation in this case. - * - * This means that early cancellation doesn't - * work when there is even slight mutex - * contention, as top-lock's mutex is usually - * held at this time. - */ - ; - else if (canwait) { - /* Waiting for @scan to be destroyed */ - cl_lock_mutex_put(env, lock); - do { - rc = cl_lock_state_wait(env, conflict); - } while (!rc && - conflict->cll_state < CLS_FREEING); - /* mutex was released, repeat enqueue. */ - rc = rc ?: CLO_REPEAT; - cl_lock_mutex_get(env, lock); - } - } - LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING)); - CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n", - conflict, rc ? "not":"", rc); - } - return rc; + einfo->ei_type = LDLM_EXTENT; + einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode); + einfo->ei_cb_bl = osc_ldlm_blocking_ast; + einfo->ei_cb_cp = ldlm_completion_ast; + einfo->ei_cb_gl = osc_ldlm_glimpse_ast; + einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */ } /** @@ -1029,14 +767,12 @@ static void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols, int force) { struct cl_lock_slice *slice = &ols->ols_cl; - struct cl_lock *lock = slice->cls_lock; LASSERT(ols->ols_state == OLS_NEW || ols->ols_state == OLS_UPCALL_RECEIVED); if (force) { ols->ols_locklessable = 1; - LASSERT(cl_lock_is_mutexed(lock)); slice->cls_ops = &osc_lock_lockless_ops; } else { struct osc_io *oio = osc_env_io(env); @@ -1051,14 +787,14 @@ static void osc_lock_to_lockless(const struct lu_env *env, io->ci_lockreq == CILR_NEVER); ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data; - ols->ols_locklessable = (io->ci_type != CIT_TRUNC) && + ols->ols_locklessable = (io->ci_type != CIT_SETATTR) && (io->ci_lockreq == CILR_MAYBE) && (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK); if (io->ci_lockreq == CILR_NEVER || /* lockless IO */ (ols->ols_locklessable && osc_object_is_contended(oob)) || /* lockless truncate */ - (io->ci_type == CIT_TRUNC && + (cl_io_is_trunc(io) && (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) && osd->od_lockless_truncate)) { ols->ols_locklessable = 1; @@ -1068,176 +804,106 @@ static void osc_lock_to_lockless(const struct lu_env *env, LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); } -/** - * Cancel all conflicting locks and wait for them to be destroyed. - * - * This function is used for two purposes: - * - * - early cancel all conflicting locks before starting IO, and - * - * - guarantee that pages added to the page cache by lockless IO are never - * covered by locks other than lockless IO lock, and, hence, are not - * visible to other threads. - */ -static int osc_lock_enqueue_wait(const struct lu_env *env, - const struct osc_lock *olck) +static bool osc_lock_compatible(const struct osc_lock *qing, + const struct osc_lock *qed) { - struct cl_lock *lock = olck->ols_cl.cls_lock; - struct cl_lock_descr *descr = &lock->cll_descr; - struct cl_object_header *hdr = cl_object_header(descr->cld_obj); - struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure; - struct cl_lock *scan; - struct cl_lock *temp; - int lockless = osc_lock_is_lockless(olck); - int rc = 0; - int canwait; - int stop; - ENTRY; + struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr; + struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr; - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(lock->cll_state == CLS_QUEUING); - - /* - * XXX This function could be sped up if we had asynchronous - * cancellation. - */ - - canwait = - !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) && - cl_lock_nr_mutexed(env) == 1; - cl_lock_closure_init(env, closure, lock, canwait); - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) { - if (scan == lock) - continue; - - if (scan->cll_state < CLS_QUEUING || - scan->cll_state == CLS_FREEING || - scan->cll_descr.cld_start > descr->cld_end || - scan->cll_descr.cld_end < descr->cld_start) - continue; - - /* overlapped and living locks. */ - - /* We're not supposed to give up group lock. */ - if (scan->cll_descr.cld_mode == CLM_GROUP) { - LASSERT(descr->cld_mode != CLM_GROUP || - descr->cld_gid != scan->cll_descr.cld_gid); - continue; - } + if (qed->ols_glimpse) + return true; - /* A tricky case for lockless pages: - * We need to cancel the compatible locks if we're enqueuing - * a lockless lock, for example: - * imagine that client has PR lock on [0, 1000], and thread T0 - * is doing lockless IO in [500, 1500] region. Concurrent - * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. - */ - if (!lockless && cl_lock_compatible(scan, lock)) - continue; - - /* Now @scan is conflicting with @lock, this means current - * thread have to sleep for @scan being destroyed. */ - cl_lock_get_trust(scan); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_get_trust(temp); - spin_unlock(&hdr->coh_lock_guard); - lu_ref_add(&scan->cll_reference, "cancel-wait", lock); - - LASSERT(list_empty(&closure->clc_list)); - rc = cl_lock_closure_build(env, scan, closure); - if (rc == 0) { - rc = osc_lock_cancel_wait(env, lock, scan, canwait); - cl_lock_disclosure(env, closure); - if (rc == -EWOULDBLOCK) - rc = 0; - } - if (rc == CLO_REPEAT && !canwait) - /* cannot wait... no early cancellation. */ - rc = 0; - - lu_ref_del(&scan->cll_reference, "cancel-wait", lock); - cl_lock_put(env, scan); - spin_lock(&hdr->coh_lock_guard); - /* - * Lock list could have been modified, while spin-lock was - * released. Check that it is safe to continue. - */ - stop = list_empty(&temp->cll_linkage); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_put(env, temp); - if (stop || rc != 0) - break; - } - spin_unlock(&hdr->coh_lock_guard); - cl_lock_closure_fini(closure); - RETURN(rc); -} + if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ) + return true; -/** - * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario: - * - * - Thread0: obtains PR:[0, 10]. Lock is busy. - * - * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to - * PR:[0, 10], but cancellation of busy lock is postponed. - * - * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to - * PW:[5, 50], and thread0 waits for the lock completion never - * releasing PR:[0, 10]---deadlock. - * - * The second PR lock can be glimpse (it is to deal with that situation that - * ll_glimpse_size() has second argument, preventing local match of - * not-yet-granted locks, see bug 10295). Similar situation is possible in the - * case of memory mapped user level buffer. - * - * To prevent this we can detect a situation when current "thread" or "io" - * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to - * the ols->ols_flags, or prevent local match with PW locks. - */ -static int osc_deadlock_is_possible(const struct lu_env *env, - struct cl_lock *lock) -{ - struct cl_object *obj; - struct cl_object_header *head; - struct cl_lock *scan; - struct osc_io *oio; + if (qed->ols_state < OLS_GRANTED) + return true; - int result; + if (qed_descr->cld_mode >= qing_descr->cld_mode && + qed_descr->cld_start <= qing_descr->cld_start && + qed_descr->cld_end >= qing_descr->cld_end) + return true; - ENTRY; + return false; +} - LASSERT(cl_lock_is_mutexed(lock)); +static void osc_lock_wake_waiters(const struct lu_env *env, + struct osc_object *osc, + struct osc_lock *oscl) +{ + spin_lock(&osc->oo_ol_spin); + list_del_init(&oscl->ols_nextlock_oscobj); + spin_unlock(&osc->oo_ol_spin); - oio = osc_env_io(env); - obj = lock->cll_descr.cld_obj; - head = cl_object_header(obj); + spin_lock(&oscl->ols_lock); + while (!list_empty(&oscl->ols_waiting_list)) { + struct osc_lock *scan; - result = 0; - spin_lock(&head->coh_lock_guard); - list_for_each_entry(scan, &head->coh_locks, cll_linkage) { - if (scan != lock) { - struct osc_lock *oscan; + scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock, + ols_wait_entry); + list_del_init(&scan->ols_wait_entry); - oscan = osc_lock_at(scan); - LASSERT(oscan != NULL); - if (oscan->ols_owner == oio) { - result = 1; - break; - } - } - } - spin_unlock(&head->coh_lock_guard); - RETURN(result); + cl_sync_io_note(env, scan->ols_owner, 0); + } + spin_unlock(&oscl->ols_lock); +} + +static void osc_lock_enqueue_wait(const struct lu_env *env, + struct osc_object *obj, + struct osc_lock *oscl) +{ + struct osc_lock *tmp_oscl; + struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr; + struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor; + + spin_lock(&obj->oo_ol_spin); + list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list); + +restart: + list_for_each_entry(tmp_oscl, &obj->oo_ol_list, + ols_nextlock_oscobj) { + struct cl_lock_descr *descr; + + if (tmp_oscl == oscl) + break; + + descr = &tmp_oscl->ols_cl.cls_lock->cll_descr; + if (descr->cld_start > need->cld_end || + descr->cld_end < need->cld_start) + continue; + + /* We're not supposed to give up group lock */ + if (descr->cld_mode == CLM_GROUP) + break; + + if (!osc_lock_is_lockless(oscl) && + osc_lock_compatible(oscl, tmp_oscl)) + continue; + + /* wait for conflicting lock to be canceled */ + cl_sync_io_init(waiter, 1, cl_sync_io_end); + oscl->ols_owner = waiter; + + spin_lock(&tmp_oscl->ols_lock); + /* add oscl into tmp's ols_waiting list */ + list_add_tail(&oscl->ols_wait_entry, + &tmp_oscl->ols_waiting_list); + spin_unlock(&tmp_oscl->ols_lock); + + spin_unlock(&obj->oo_ol_spin); + (void)cl_sync_io_wait(env, waiter, 0); + + spin_lock(&obj->oo_ol_spin); + oscl->ols_owner = NULL; + goto restart; + } + spin_unlock(&obj->oo_ol_spin); } /** * Implementation of cl_lock_operations::clo_enqueue() method for osc * layer. This initiates ldlm enqueue: * - * - checks for possible dead-lock conditions (osc_deadlock_is_possible()); - * * - cancels conflicting locks early (osc_lock_enqueue_wait()); * * - calls osc_enqueue_base() to do actual enqueue. @@ -1249,142 +915,125 @@ static int osc_deadlock_is_possible(const struct lu_env *env, * This function does not wait for the network communication to complete. */ static int osc_lock_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *_, __u32 enqflags) + const struct cl_lock_slice *slice, + struct cl_io *unused, struct cl_sync_io *anchor) { - struct osc_lock *ols = cl2osc_lock(slice); - struct cl_lock *lock = ols->ols_cl.cls_lock; - struct osc_object *obj = cl2osc(slice->cls_obj); - struct osc_thread_info *info = osc_env_info(env); - struct ldlm_res_id *resname = &info->oti_resname; - ldlm_policy_data_t *policy = &info->oti_policy; - struct ldlm_enqueue_info *einfo = &ols->ols_einfo; - int result; - ENTRY; + struct osc_thread_info *info = osc_env_info(env); + struct osc_io *oio = osc_env_io(env); + struct osc_object *osc = cl2osc(slice->cls_obj); + struct osc_lock *oscl = cl2osc_lock(slice); + struct cl_lock *lock = slice->cls_lock; + struct ldlm_res_id *resname = &info->oti_resname; + ldlm_policy_data_t *policy = &info->oti_policy; + osc_enqueue_upcall_f upcall = osc_lock_upcall; + void *cookie = oscl; + bool async = false; + int result; - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(lock->cll_state == CLS_QUEUING); - LASSERT(ols->ols_state == OLS_NEW); - - osc_lock_build_res(env, obj, resname); - osc_lock_build_policy(env, lock, policy); - ols->ols_flags = osc_enq2ldlm_flags(enqflags); - if (osc_deadlock_is_possible(env, lock)) - ols->ols_flags |= LDLM_FL_BLOCK_GRANTED; - if (ols->ols_flags & LDLM_FL_HAS_INTENT) - ols->ols_glimpse = 1; - - result = osc_lock_enqueue_wait(env, ols); - if (result == 0) { - if (!(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); - if (!osc_lock_is_lockless(ols)) { - if (ols->ols_locklessable) - ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - - /* a reference for lock, passed as an upcall cookie */ - cl_lock_get(lock); - lu_ref_add(&lock->cll_reference, "upcall", lock); - ols->ols_state = OLS_ENQUEUED; + ENTRY; - /* - * XXX: this is possible blocking point as - * ldlm_lock_match(LDLM_FL_LVB_READY) waits for - * LDLM_CP_CALLBACK. - */ - result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1); - if (result != 0) { - lu_ref_del(&lock->cll_reference, - "upcall", lock); - cl_lock_put(env, lock); - } - } else { - ols->ols_state = OLS_GRANTED; - } - } - LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); - RETURN(result); + LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), + "lock = %p, ols = %p\n", lock, oscl); + + if (oscl->ols_state == OLS_GRANTED) + RETURN(0); + + if (oscl->ols_flags & LDLM_FL_TEST_LOCK) + GOTO(enqueue_base, 0); + + if (oscl->ols_glimpse) { + LASSERT(equi(oscl->ols_agl, anchor == NULL)); + async = true; + GOTO(enqueue_base, 0); + } + + osc_lock_enqueue_wait(env, osc, oscl); + + /* we can grant lockless lock right after all conflicting locks + * are canceled. */ + if (osc_lock_is_lockless(oscl)) { + oscl->ols_state = OLS_GRANTED; + oio->oi_lockless = 1; + RETURN(0); + } + +enqueue_base: + oscl->ols_state = OLS_ENQUEUED; + if (anchor != NULL) { + atomic_inc(&anchor->csi_sync_nr); + oscl->ols_owner = anchor; + } + + /** + * DLM lock's ast data must be osc_object; + * if glimpse or AGL lock, async of osc_enqueue_base() must be true, + * DLM's enqueue callback set to osc_lock_upcall() with cookie as + * osc_lock. + */ + ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname); + osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo); + osc_lock_build_policy(env, lock, policy); + if (oscl->ols_agl) { + oscl->ols_einfo.ei_cbdata = NULL; + /* hold a reference for callback */ + cl_object_get(osc2cl(osc)); + upcall = osc_lock_upcall_agl; + cookie = osc; + } + result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags, + policy, &oscl->ols_lvb, + osc->oo_oinfo->loi_kms_valid, + upcall, cookie, + &oscl->ols_einfo, PTLRPCD_SET, async, + oscl->ols_agl); + if (result != 0) { + oscl->ols_state = OLS_CANCELLED; + osc_lock_wake_waiters(env, osc, oscl); + + /* hide error for AGL lock. */ + if (oscl->ols_agl) { + cl_object_put(env, osc2cl(osc)); + result = 0; + } + + if (anchor != NULL) + cl_sync_io_note(env, anchor, result); + } else { + if (osc_lock_is_lockless(oscl)) { + oio->oi_lockless = 1; + } else if (!async) { + LASSERT(oscl->ols_state == OLS_GRANTED); + LASSERT(oscl->ols_hold); + LASSERT(oscl->ols_dlmlock != NULL); + } + } + RETURN(result); } -static int osc_lock_wait(const struct lu_env *env, - const struct cl_lock_slice *slice) +/** + * Breaks a link between osc_lock and dlm_lock. + */ +static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) { - struct osc_lock *olck = cl2osc_lock(slice); - struct cl_lock *lock = olck->ols_cl.cls_lock; + struct ldlm_lock *dlmlock; - LINVRNT(osc_lock_invariant(olck)); - if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) - return 0; + dlmlock = olck->ols_dlmlock; + if (dlmlock == NULL) + return; - LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + if (olck->ols_hold) { + olck->ols_hold = 0; + osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode); + olck->ols_handle.cookie = 0ULL; + } - return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; -} + olck->ols_dlmlock = NULL; -/** - * An implementation of cl_lock_operations::clo_use() method that pins cached - * lock. - */ -static int osc_lock_use(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *olck = cl2osc_lock(slice); - int rc; - - LASSERT(!olck->ols_hold); - /* - * Atomically check for LDLM_FL_CBPENDING and addref a lock if this - * flag is not set. This protects us from a concurrent blocking ast. - */ - rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode); - if (rc == 0) { - olck->ols_hold = olck->ols_has_ref = 1; - olck->ols_state = OLS_GRANTED; - } else { - struct cl_lock *lock; - - /* - * Lock is being cancelled somewhere within - * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already - * set, but osc_ldlm_blocking_ast() hasn't yet acquired - * cl_lock mutex. - */ - lock = slice->cls_lock; - LASSERT(lock->cll_state == CLS_CACHED); - LASSERT(lock->cll_users > 0); - LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING); - /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ - olck->ols_ast_wait = 1; - rc = CLO_WAIT; - } - return rc; -} - -static int osc_lock_flush(struct osc_lock *ols, int discard) -{ - struct cl_lock *lock = ols->ols_cl.cls_lock; - struct cl_env_nest nest; - struct lu_env *env; - int result = 0; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - result = cl_lock_page_out(env, lock, discard); - cl_env_nested_put(&nest, env); - } else - result = PTR_ERR(env); - if (result == 0) - ols->ols_flush = 1; - return result; + /* release a reference taken in osc_lock_upcall(). */ + LASSERT(olck->ols_has_ref); + lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); + LDLM_LOCK_RELEASE(dlmlock); + olck->ols_has_ref = 0; } /** @@ -1404,274 +1053,183 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) static void osc_lock_cancel(const struct lu_env *env, const struct cl_lock_slice *slice) { - struct cl_lock *lock = slice->cls_lock; - struct osc_lock *olck = cl2osc_lock(slice); - struct ldlm_lock *dlmlock = olck->ols_lock; - int result = 0; - int discard; - - LASSERT(cl_lock_is_mutexed(lock)); - LINVRNT(osc_lock_invariant(olck)); - - if (dlmlock != NULL) { - int do_cancel; - - discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA; - result = osc_lock_flush(olck, discard); - if (olck->ols_hold) - osc_lock_unuse(env, slice); - - lock_res_and_lock(dlmlock); - /* Now that we're the only user of dlm read/write reference, - * mostly the ->l_readers + ->l_writers should be zero. - * However, there is a corner case. - * See bug 18829 for details.*/ - do_cancel = (dlmlock->l_readers == 0 && - dlmlock->l_writers == 0); - dlmlock->l_flags |= LDLM_FL_CBPENDING; - unlock_res_and_lock(dlmlock); - if (do_cancel) - result = ldlm_cli_cancel(&olck->ols_handle); - if (result < 0) - CL_LOCK_DEBUG(D_ERROR, env, lock, - "lock %p cancel failure with error(%d)\n", - lock, result); - } - olck->ols_state = OLS_CANCELLED; - osc_lock_detach(env, olck); -} + struct osc_object *obj = cl2osc(slice->cls_obj); + struct osc_lock *oscl = cl2osc_lock(slice); -void cl_lock_page_list_fixup(const struct lu_env *env, - struct cl_io *io, struct cl_lock *lock, - struct cl_page_list *queue); + ENTRY; -#ifdef INVARIANT_CHECK -/** - * Returns true iff there are pages under \a olck not protected by other - * locks. - */ -static int osc_lock_has_pages(struct osc_lock *olck) -{ - struct cl_lock *lock; - struct cl_lock_descr *descr; - struct cl_object *obj; - struct osc_object *oob; - struct cl_page_list *plist; - struct cl_page *page; - struct cl_env_nest nest; - struct cl_io *io; - struct lu_env *env; - int result; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - obj = olck->ols_cl.cls_obj; - oob = cl2osc(obj); - io = &oob->oo_debug_io; - lock = olck->ols_cl.cls_lock; - descr = &lock->cll_descr; - plist = &osc_env_info(env)->oti_plist; - cl_page_list_init(plist); - - mutex_lock(&oob->oo_debug_mutex); - - io->ci_obj = cl_object_top(obj); - cl_io_init(env, io, CIT_MISC, io->ci_obj); - cl_page_gang_lookup(env, obj, io, - descr->cld_start, descr->cld_end, plist); - cl_lock_page_list_fixup(env, io, lock, plist); - if (plist->pl_nr > 0) { - CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n"); - cl_page_list_for_each(page, plist) - CL_PAGE_DEBUG(D_ERROR, env, page, "\n"); - } - result = plist->pl_nr > 0; - cl_page_list_disown(env, io, plist); - cl_page_list_fini(env, plist); - cl_io_fini(env, io); - mutex_unlock(&oob->oo_debug_mutex); - cl_env_nested_put(&nest, env); - } else - result = 0; - return result; -} -#else -# define osc_lock_has_pages(olck) (0) -#endif /* INVARIANT_CHECK */ - -static void osc_lock_delete(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *olck; + LINVRNT(osc_lock_invariant(oscl)); - olck = cl2osc_lock(slice); - LINVRNT(osc_lock_invariant(olck)); - LINVRNT(!osc_lock_has_pages(olck)); + osc_lock_detach(env, oscl); + oscl->ols_state = OLS_CANCELLED; + oscl->ols_flags &= ~LDLM_FL_LVB_READY; - if (olck->ols_hold) - osc_lock_unuse(env, slice); - osc_lock_detach(env, olck); -} - -/** - * Implements cl_lock_operations::clo_state() method for osc layer. - * - * Maintains osc_lock::ols_owner field. - * - * This assumes that lock always enters CLS_HELD (from some other state) in - * the same IO context as one that requested the lock. This should not be a - * problem, because context is by definition shared by all activity pertaining - * to the same high-level IO. - */ -static void osc_lock_state(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state state) -{ - struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); - - /* - * XXX multiple io contexts can use the lock at the same time. - */ - LINVRNT(osc_lock_invariant(lock)); - if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { - LASSERT(lock->ols_owner == NULL); - lock->ols_owner = oio; - } else if (state != CLS_HELD) - lock->ols_owner = NULL; + osc_lock_wake_waiters(env, obj, oscl); + EXIT; } static int osc_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) + lu_printer_t p, const struct cl_lock_slice *slice) { - struct osc_lock *lock = cl2osc_lock(slice); - - /* - * XXX print ldlm lock and einfo properly. - */ - (*p)(env, cookie, "%p %08x "LPU64" %d %p ", - lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, - lock->ols_state, lock->ols_owner); - osc_lvb_print(env, cookie, p, &lock->ols_lvb); - return 0; + struct osc_lock *lock = cl2osc_lock(slice); + + (*p)(env, cookie, "%p "LPX64" "LPX64" %d %p ", + lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie, + lock->ols_state, lock->ols_owner); + osc_lvb_print(env, cookie, p, &lock->ols_lvb); + return 0; } static const struct cl_lock_operations osc_lock_ops = { .clo_fini = osc_lock_fini, .clo_enqueue = osc_lock_enqueue, - .clo_wait = osc_lock_wait, - .clo_unuse = osc_lock_unuse, - .clo_use = osc_lock_use, - .clo_delete = osc_lock_delete, - .clo_state = osc_lock_state, .clo_cancel = osc_lock_cancel, - .clo_weigh = osc_lock_weigh, - .clo_print = osc_lock_print + .clo_print = osc_lock_print, }; -static int osc_lock_lockless_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *_, __u32 enqflags) -{ - LBUG(); - return 0; -} - -static int osc_lock_lockless_unuse(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *ols = cl2osc_lock(slice); - struct cl_lock *lock = slice->cls_lock; - - LASSERT(ols->ols_state == OLS_GRANTED); - LINVRNT(osc_lock_invariant(ols)); - - cl_lock_cancel(env, lock); - cl_lock_delete(env, lock); - return 0; -} - static void osc_lock_lockless_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice) { - struct osc_lock *ols = cl2osc_lock(slice); - int result; - - result = osc_lock_flush(ols, 0); + struct osc_lock *ols = cl2osc_lock(slice); + struct osc_object *osc = cl2osc(slice->cls_obj); + struct cl_lock_descr *descr = &slice->cls_lock->cll_descr; + int result; + + LASSERT(ols->ols_dlmlock == NULL); + result = osc_lock_flush(osc, descr->cld_start, descr->cld_end, + descr->cld_mode, 0); if (result) CERROR("Pages for lockless lock %p were not purged(%d)\n", ols, result); - ols->ols_state = OLS_CANCELLED; -} - -static int osc_lock_lockless_wait(const struct lu_env *env, - const struct cl_lock_slice *slice) -{ - struct osc_lock *olck = cl2osc_lock(slice); - struct cl_lock *lock = olck->ols_cl.cls_lock; - - LINVRNT(osc_lock_invariant(olck)); - LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED); - - return lock->cll_error; -} -static void osc_lock_lockless_state(const struct lu_env *env, - const struct cl_lock_slice *slice, - enum cl_lock_state state) -{ - struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); - - LINVRNT(osc_lock_invariant(lock)); - if (state == CLS_HELD) { - LASSERT(lock->ols_owner == NULL); - lock->ols_owner = oio; - - /* set the io to be lockless if this lock is for io's - * host object */ - if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) - oio->oi_lockless = 1; - } else - lock->ols_owner = NULL; -} - -static int osc_lock_lockless_fits_into(const struct lu_env *env, - const struct cl_lock_slice *slice, - const struct cl_lock_descr *need, - const struct cl_io *io) -{ - return 0; + osc_lock_wake_waiters(env, osc, ols); } static const struct cl_lock_operations osc_lock_lockless_ops = { .clo_fini = osc_lock_fini, - .clo_enqueue = osc_lock_lockless_enqueue, - .clo_wait = osc_lock_lockless_wait, - .clo_unuse = osc_lock_lockless_unuse, - .clo_state = osc_lock_lockless_state, - .clo_fits_into = osc_lock_lockless_fits_into, + .clo_enqueue = osc_lock_enqueue, .clo_cancel = osc_lock_lockless_cancel, .clo_print = osc_lock_print }; -int osc_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *_) +static void osc_lock_set_writer(const struct lu_env *env, + const struct cl_io *io, + struct cl_object *obj, struct osc_lock *oscl) { - struct osc_lock *clk; - int result; - - OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO); - if (clk != NULL) { - osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); - clk->ols_state = OLS_NEW; - cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops); - result = 0; - } else - result = -ENOMEM; - return result; + struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr; + pgoff_t io_start; + pgoff_t io_end; + + if (!cl_object_same(io->ci_obj, obj)) + return; + + if (likely(io->ci_type == CIT_WRITE)) { + io_start = cl_index(obj, io->u.ci_rw.crw_pos); + io_end = cl_index(obj, io->u.ci_rw.crw_pos + + io->u.ci_rw.crw_count - 1); + if (cl_io_is_append(io)) { + io_start = 0; + io_end = CL_PAGE_EOF; + } + } else { + LASSERT(cl_io_is_mkwrite(io)); + io_start = io_end = io->u.ci_fault.ft_index; + } + + if (descr->cld_mode >= CLM_WRITE && + descr->cld_start <= io_start && descr->cld_end >= io_end) { + struct osc_io *oio = osc_env_io(env); + + /* There must be only one lock to match the write region */ + LASSERT(oio->oi_write_osclock == NULL); + oio->oi_write_osclock = oscl; + } } +int osc_lock_init(const struct lu_env *env, + struct cl_object *obj, struct cl_lock *lock, + const struct cl_io *io) +{ + struct osc_lock *oscl; + __u32 enqflags = lock->cll_descr.cld_enq_flags; + + OBD_SLAB_ALLOC_PTR_GFP(oscl, osc_lock_kmem, GFP_NOFS); + if (oscl == NULL) + return -ENOMEM; + + oscl->ols_state = OLS_NEW; + spin_lock_init(&oscl->ols_lock); + INIT_LIST_HEAD(&oscl->ols_waiting_list); + INIT_LIST_HEAD(&oscl->ols_wait_entry); + INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj); + + oscl->ols_flags = osc_enq2ldlm_flags(enqflags); + oscl->ols_agl = !!(enqflags & CEF_AGL); + if (oscl->ols_agl) + oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT; + if (oscl->ols_flags & LDLM_FL_HAS_INTENT) { + oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED; + oscl->ols_glimpse = 1; + } + + cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops); + + if (!(enqflags & CEF_MUST)) + /* try to convert this lock to a lockless lock */ + osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER)); + if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) + oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; + + if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io)) + osc_lock_set_writer(env, io, obj, oscl); + + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags "LPX64"\n", + lock, oscl, oscl->ols_flags); + + return 0; +} +/** + * Finds an existing lock covering given index and optionally different from a + * given \a except lock. + */ +struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env, + struct osc_object *obj, pgoff_t index, + enum osc_dap_flags dap_flags) +{ + struct osc_thread_info *info = osc_env_info(env); + struct ldlm_res_id *resname = &info->oti_resname; + ldlm_policy_data_t *policy = &info->oti_policy; + struct lustre_handle lockh; + struct ldlm_lock *lock = NULL; + ldlm_mode_t mode; + __u64 flags; + + ENTRY; + + ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); + osc_index2policy(policy, osc2cl(obj), index, index); + policy->l_extent.gid = LDLM_GID_ANY; + + flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING; + if (dap_flags & OSC_DAP_FL_TEST_LOCK) + flags |= LDLM_FL_TEST_LOCK; + /* + * It is fine to match any group lock since there could be only one + * with a uniq gid and it conflicts with all other lock modes too + */ +again: + mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace, + flags, resname, LDLM_EXTENT, policy, + LCK_PR | LCK_PW | LCK_GROUP, &lockh, + dap_flags & OSC_DAP_FL_CANCELING); + if (mode != 0) { + lock = ldlm_handle2lock(&lockh); + /* RACE: the lock is cancelled so let's try again */ + if (unlikely(lock == NULL)) + goto again; + } + + RETURN(lock); +} /** @} osc */