X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_lock.c;h=0b03ea7c32b33e6b4b69de37197be41d1949acae;hp=6ca2014e431e0e78b58407846f1e76e7e01f7a50;hb=d2dbff42e78d7ebca4db534df7e1c19f6b674a22;hpb=40ac868676c42015e2aaec49242de54c80bc5c92 diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index 6ca2014..0b03ea7 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -54,6 +54,8 @@ * @{ */ +#define _PAGEREF_MAGIC (-10000000) + /***************************************************************************** * * Type conversions. @@ -135,10 +137,10 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) { struct ldlm_lock *dlmlock; - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; if (dlmlock == NULL) { - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); return; } @@ -147,35 +149,49 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) * call to osc_lock_detach() */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { struct cl_object *obj = olck->ols_cl.cls_obj; struct cl_attr *attr = &osc_env_info(env)->oti_attr; - __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms; + __u64 old_kms; + cl_object_attr_lock(obj); + /* Must get the value under the lock to avoid possible races. */ + old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. * Not a problem for the client */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); - unlock_res_and_lock(dlmlock); - cl_object_attr_lock(obj); cl_object_attr_set(env, obj, attr, CAT_KMS); cl_object_attr_unlock(obj); - } else - unlock_res_and_lock(dlmlock); + } + unlock_res_and_lock(dlmlock); /* release a reference taken in osc_lock_upcall0(). */ + LASSERT(olck->ols_has_ref); lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); LDLM_LOCK_RELEASE(dlmlock); + olck->ols_has_ref = 0; +} + +static int osc_lock_unhold(struct osc_lock *ols) +{ + int result = 0; + + if (ols->ols_hold) { + ols->ols_hold = 0; + result = osc_cancel_base(&ols->ols_handle, + ols->ols_einfo.ei_mode); + } + return result; } static int osc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) { struct osc_lock *ols = cl2osc_lock(slice); - int result; LASSERT(ols->ols_state == OLS_GRANTED || ols->ols_state == OLS_UPCALL_RECEIVED); @@ -193,10 +209,7 @@ static int osc_lock_unuse(const struct lu_env *env, * e.g., for liblustre) sees that lock is released. */ ols->ols_state = OLS_RELEASED; - ols->ols_hold = 0; - result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode); - ols->ols_has_ref = 0; - return result; + return osc_lock_unhold(ols); } static void osc_lock_fini(const struct lu_env *env, @@ -211,9 +224,10 @@ static void osc_lock_fini(const struct lu_env *env, * to the lock), before reply from a server was received. In this case * lock is destroyed immediately after upcall. */ - if (ols->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(ols); LASSERT(ols->ols_lock == NULL); + LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 || + cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); } @@ -231,9 +245,9 @@ void osc_lock_build_res(const struct lu_env *env, const struct osc_object *obj, } else { /* * In reality, where ost server expects ->lsm_object_id and - * ->lsm_object_gr in rename. + * ->lsm_object_seq in rename. */ - osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr, + osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq, resname); } } @@ -267,14 +281,14 @@ static int osc_enq2ldlm_flags(__u32 enqflags) * Global spin-lock protecting consistency of ldlm_lock::l_ast_data * pointers. Initialized in osc_init(). */ -spinlock_t osc_ast_guard; +cfs_spinlock_t osc_ast_guard; static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) { struct osc_lock *olck; lock_res_and_lock(dlm_lock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; if (olck != NULL) { struct cl_lock *lock = olck->ols_cl.cls_lock; @@ -294,7 +308,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) } else olck = NULL; } - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); unlock_res_and_lock(dlm_lock); return olck; } @@ -399,7 +413,7 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode); ENTRY; - if (olck->ols_state != OLS_GRANTED) { + if (olck->ols_state < OLS_GRANTED) { lock = olck->ols_cl.cls_lock; ext = &dlmlock->l_policy_data.l_extent; descr = &osc_env_info(env)->oti_descr; @@ -442,11 +456,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); LASSERT(olck->ols_lock == NULL); olck->ols_lock = dlmlock; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); /* * Lock might be not yet granted. In this case, completion ast @@ -462,11 +476,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) * this. */ ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); - olck->ols_hold = olck->ols_has_ref = 1; + olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by * osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); + olck->ols_has_ref = 1; } /** @@ -496,7 +511,7 @@ static int osc_lock_upcall(void *cookie, int errcode) } else if (olck->ols_state == OLS_CANCELLED) { rc = -EIO; } else { - CERROR("Impossible state: %i\n", olck->ols_state); + CERROR("Impossible state: %d\n", olck->ols_state); LBUG(); } if (rc) { @@ -505,11 +520,11 @@ static int osc_lock_upcall(void *cookie, int errcode) dlmlock = ldlm_handle2lock(&olck->ols_handle); if (dlmlock != NULL) { lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); LASSERT(olck->ols_lock == NULL); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); unlock_res_and_lock(dlmlock); LDLM_LOCK_PUT(dlmlock); } @@ -569,12 +584,11 @@ static void osc_lock_blocking(const struct lu_env *env, CLASSERT(OLS_BLOCKED < OLS_CANCELLED); LASSERT(!osc_lock_is_lockless(olck)); - if (olck->ols_hold) - /* - * Lock might be still addref-ed here, if e.g., blocking ast - * is sent for a failed lock. - */ - osc_lock_unuse(env, &olck->ols_cl); + /* + * Lock might be still addref-ed here, if e.g., blocking ast + * is sent for a failed lock. + */ + osc_lock_unhold(olck); if (blocking && olck->ols_state < OLS_BLOCKED) /* @@ -771,7 +785,7 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, LASSERT(dlmlock->l_lvb_data != NULL); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) + if (olck->ols_lock == NULL) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -781,12 +795,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, * and ldlm_lock are always bound when * osc_lock is in OLS_GRANTED state. */ - ; - else if (dlmlock->l_granted_mode != LCK_MINMODE) + } else if (dlmlock->l_granted_mode == + dlmlock->l_req_mode) { osc_lock_granted(env, olck, dlmlock, dlmrc); + } unlock_res_and_lock(dlmlock); - if (dlmrc != 0) + + if (dlmrc != 0) { + CL_LOCK_DEBUG(D_ERROR, env, lock, + "dlmlock returned %d\n", dlmrc); cl_lock_error(env, lock, dlmrc); + } cl_lock_mutex_put(env, lock); osc_ast_data_put(env, olck); result = 0; @@ -876,7 +895,7 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) unsigned long weight; ENTRY; - might_sleep(); + cfs_might_sleep(); /* * osc_ldlm_weigh_ast has a complex context since it might be called * because of lock canceling, or from user's input. We have to make @@ -936,86 +955,6 @@ static void osc_lock_build_einfo(const struct lu_env *env, einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ } -static int osc_lock_delete0(struct cl_lock *conflict) -{ - struct cl_env_nest nest; - struct lu_env *env; - int rc = 0; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - cl_lock_delete(env, conflict); - cl_env_nested_put(&nest, env); - } else - rc = PTR_ERR(env); - return rc; -} -/** - * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This - * is called as a part of enqueuing to cancel conflicting locks early. - * - * \retval 0: success, \a conflict was cancelled and destroyed. - * - * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was - * released in the process. Repeat enqueing. - * - * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and - * either \a lock is non-blocking, or current thread - * holds other locks, that prevent it from waiting - * for cancel to complete. - * - * \retval -ve: other error, including -EINTR. - * - */ -static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock *conflict, int canwait) -{ - int rc; - - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(cl_lock_is_mutexed(conflict)); - - rc = 0; - if (conflict->cll_state != CLS_FREEING) { - cl_lock_cancel(env, conflict); - rc = osc_lock_delete0(conflict); - if (rc) - return rc; - if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) { - rc = -EWOULDBLOCK; - if (cl_lock_nr_mutexed(env) > 2) - /* - * If mutices of locks other than @lock and - * @scan are held by the current thread, it - * cannot wait on @scan state change in a - * dead-lock safe matter, so simply skip early - * cancellation in this case. - * - * This means that early cancellation doesn't - * work when there is even slight mutex - * contention, as top-lock's mutex is usually - * held at this time. - */ - ; - else if (canwait) { - /* Waiting for @scan to be destroyed */ - cl_lock_mutex_put(env, lock); - do { - rc = cl_lock_state_wait(env, conflict); - } while (!rc && - conflict->cll_state < CLS_FREEING); - /* mutex was released, repeat enqueue. */ - rc = rc ?: CLO_REPEAT; - cl_lock_mutex_get(env, lock); - } - } - LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING)); - CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n", - conflict, rc ? "not":"", rc); - } - return rc; -} - /** * Determine if the lock should be converted into a lockless lock. * @@ -1054,14 +993,14 @@ static void osc_lock_to_lockless(const struct lu_env *env, io->ci_lockreq == CILR_NEVER); ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data; - ols->ols_locklessable = (io->ci_type != CIT_TRUNC) && + ols->ols_locklessable = (io->ci_type != CIT_SETATTR) && (io->ci_lockreq == CILR_MAYBE) && (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK); if (io->ci_lockreq == CILR_NEVER || /* lockless IO */ (ols->ols_locklessable && osc_object_is_contended(oob)) || /* lockless truncate */ - (io->ci_type == CIT_TRUNC && + (cl_io_is_trunc(io) && (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) && osd->od_lockless_truncate)) { ols->ols_locklessable = 1; @@ -1071,6 +1010,21 @@ static void osc_lock_to_lockless(const struct lu_env *env, LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); } +static int osc_lock_compatible(const struct osc_lock *qing, + const struct osc_lock *qed) +{ + enum cl_lock_mode qing_mode; + enum cl_lock_mode qed_mode; + + qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode; + if (qed->ols_glimpse && + (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ)) + return 1; + + qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode; + return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ)); +} + /** * Cancel all conflicting locks and wait for them to be destroyed. * @@ -1088,36 +1042,32 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock = olck->ols_cl.cls_lock; struct cl_lock_descr *descr = &lock->cll_descr; struct cl_object_header *hdr = cl_object_header(descr->cld_obj); - struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure; struct cl_lock *scan; - struct cl_lock *temp; + struct cl_lock *conflict= NULL; int lockless = osc_lock_is_lockless(olck); int rc = 0; - int canwait; - int stop; ENTRY; LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - /* - * XXX This function could be sped up if we had asynchronous - * cancellation. - */ + /* make it enqueue anyway for glimpse lock, because we actually + * don't need to cancel any conflicting locks. */ + if (olck->ols_glimpse) + return 0; + + cfs_spin_lock(&hdr->coh_lock_guard); + cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { + struct cl_lock_descr *cld = &scan->cll_descr; + const struct osc_lock *scan_ols; - canwait = - !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) && - cl_lock_nr_mutexed(env) == 1; - cl_lock_closure_init(env, closure, lock, canwait); - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) { if (scan == lock) - continue; + break; if (scan->cll_state < CLS_QUEUING || scan->cll_state == CLS_FREEING || - scan->cll_descr.cld_start > descr->cld_end || - scan->cll_descr.cld_end < descr->cld_start) + cld->cld_start > descr->cld_end || + cld->cld_end < descr->cld_start) continue; /* overlapped and living locks. */ @@ -1129,118 +1079,60 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, continue; } - /* A tricky case for lockless pages: - * We need to cancel the compatible locks if we're enqueuing + scan_ols = osc_lock_at(scan); + + /* We need to cancel the compatible locks if we're enqueuing * a lockless lock, for example: * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. - */ - if (!lockless && cl_lock_compatible(scan, lock)) + * wrong, because these data are possibly stale. */ + if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; /* Now @scan is conflicting with @lock, this means current * thread have to sleep for @scan being destroyed. */ - cl_lock_get_trust(scan); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_get_trust(temp); - spin_unlock(&hdr->coh_lock_guard); - lu_ref_add(&scan->cll_reference, "cancel-wait", lock); - - LASSERT(list_empty(&closure->clc_list)); - rc = cl_lock_closure_build(env, scan, closure); - if (rc == 0) { - rc = osc_lock_cancel_wait(env, lock, scan, canwait); - cl_lock_disclosure(env, closure); - if (rc == -EWOULDBLOCK) - rc = 0; + if (scan_ols->ols_owner == osc_env_io(env)) { + CERROR("DEADLOCK POSSIBLE!\n"); + CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n"); + CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n"); + libcfs_debug_dumpstack(NULL); } - if (rc == CLO_REPEAT && !canwait) - /* cannot wait... no early cancellation. */ - rc = 0; - - lu_ref_del(&scan->cll_reference, "cancel-wait", lock); - cl_lock_put(env, scan); - spin_lock(&hdr->coh_lock_guard); - /* - * Lock list could have been modified, while spin-lock was - * released. Check that it is safe to continue. - */ - stop = list_empty(&temp->cll_linkage); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_put(env, temp); - if (stop || rc != 0) - break; + cl_lock_get_trust(scan); + conflict = scan; + break; } - spin_unlock(&hdr->coh_lock_guard); - cl_lock_closure_fini(closure); - RETURN(rc); -} + cfs_spin_unlock(&hdr->coh_lock_guard); -/** - * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario: - * - * - Thread0: obtains PR:[0, 10]. Lock is busy. - * - * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to - * PR:[0, 10], but cancellation of busy lock is postponed. - * - * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to - * PW:[5, 50], and thread0 waits for the lock completion never - * releasing PR:[0, 10]---deadlock. - * - * The second PR lock can be glimpse (it is to deal with that situation that - * ll_glimpse_size() has second argument, preventing local match of - * not-yet-granted locks, see bug 10295). Similar situation is possible in the - * case of memory mapped user level buffer. - * - * To prevent this we can detect a situation when current "thread" or "io" - * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to - * the ols->ols_flags, or prevent local match with PW locks. - */ -static int osc_deadlock_is_possible(const struct lu_env *env, - struct cl_lock *lock) -{ - struct cl_object *obj; - struct cl_object_header *head; - struct cl_lock *scan; - struct osc_io *oio; - - int result; - - ENTRY; - - LASSERT(cl_lock_is_mutexed(lock)); - - oio = osc_env_io(env); - obj = lock->cll_descr.cld_obj; - head = cl_object_header(obj); - - result = 0; - spin_lock(&head->coh_lock_guard); - list_for_each_entry(scan, &head->coh_locks, cll_linkage) { - if (scan != lock) { - struct osc_lock *oscan; - - oscan = osc_lock_at(scan); - LASSERT(oscan != NULL); - if (oscan->ols_owner == oio) { - result = 1; - break; - } + if (conflict) { + if (lock->cll_descr.cld_mode == CLM_GROUP) { + /* we want a group lock but a previous lock request + * conflicts, we do not wait but return 0 so the + * request is send to the server + */ + CDEBUG(D_DLMTRACE, "group lock %p is conflicted " + "with %p, no wait, send to server\n", + lock, conflict); + cl_lock_put(env, conflict); + rc = 0; + } else { + CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, " + "will wait\n", + lock, conflict); + LASSERT(lock->cll_conflict == NULL); + lu_ref_add(&conflict->cll_reference, "cancel-wait", + lock); + lock->cll_conflict = conflict; + rc = CLO_WAIT; } } - spin_unlock(&head->coh_lock_guard); - RETURN(result); + RETURN(rc); } /** * Implementation of cl_lock_operations::clo_enqueue() method for osc * layer. This initiates ldlm enqueue: * - * - checks for possible dead-lock conditions (osc_deadlock_is_possible()); - * * - cancels conflicting locks early (osc_lock_enqueue_wait()); * * - calls osc_enqueue_base() to do actual enqueue. @@ -1257,11 +1149,6 @@ static int osc_lock_enqueue(const struct lu_env *env, { struct osc_lock *ols = cl2osc_lock(slice); struct cl_lock *lock = ols->ols_cl.cls_lock; - struct osc_object *obj = cl2osc(slice->cls_obj); - struct osc_thread_info *info = osc_env_info(env); - struct ldlm_res_id *resname = &info->oti_resname; - ldlm_policy_data_t *policy = &info->oti_policy; - struct ldlm_enqueue_info *einfo = &ols->ols_einfo; int result; ENTRY; @@ -1269,20 +1156,22 @@ static int osc_lock_enqueue(const struct lu_env *env, LASSERT(lock->cll_state == CLS_QUEUING); LASSERT(ols->ols_state == OLS_NEW); - osc_lock_build_res(env, obj, resname); - osc_lock_build_policy(env, lock, policy); ols->ols_flags = osc_enq2ldlm_flags(enqflags); - if (osc_deadlock_is_possible(env, lock)) - ols->ols_flags |= LDLM_FL_BLOCK_GRANTED; if (ols->ols_flags & LDLM_FL_HAS_INTENT) ols->ols_glimpse = 1; + if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST)) + /* try to convert this lock to a lockless lock */ + osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { - if (!(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); if (!osc_lock_is_lockless(ols)) { + struct osc_object *obj = cl2osc(slice->cls_obj); + struct osc_thread_info *info = osc_env_info(env); + struct ldlm_res_id *resname = &info->oti_resname; + ldlm_policy_data_t *policy = &info->oti_policy; + struct ldlm_enqueue_info *einfo = &ols->ols_einfo; + if (ols->ols_locklessable) ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; @@ -1296,6 +1185,8 @@ static int osc_lock_enqueue(const struct lu_env *env, * ldlm_lock_match(LDLM_FL_LVB_READY) waits for * LDLM_CP_CALLBACK. */ + osc_lock_build_res(env, obj, resname); + osc_lock_build_policy(env, lock, policy); result = osc_enqueue_base(osc_export(obj), resname, &ols->ols_flags, policy, &ols->ols_lvb, @@ -1310,6 +1201,7 @@ static int osc_lock_enqueue(const struct lu_env *env, } } else { ols->ols_state = OLS_GRANTED; + ols->ols_owner = osc_env_io(env); } } LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); @@ -1343,13 +1235,14 @@ static int osc_lock_use(const struct lu_env *env, int rc; LASSERT(!olck->ols_hold); + /* * Atomically check for LDLM_FL_CBPENDING and addref a lock if this * flag is not set. This protects us from a concurrent blocking ast. */ rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode); if (rc == 0) { - olck->ols_hold = olck->ols_has_ref = 1; + olck->ols_hold = 1; olck->ols_state = OLS_GRANTED; } else { struct cl_lock *lock; @@ -1422,8 +1315,7 @@ static void osc_lock_cancel(const struct lu_env *env, discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA; result = osc_lock_flush(olck, discard); - if (olck->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(olck); lock_res_and_lock(dlmlock); /* Now that we're the only user of dlm read/write reference, @@ -1477,12 +1369,13 @@ static int osc_lock_has_pages(struct osc_lock *olck) plist = &osc_env_info(env)->oti_plist; cl_page_list_init(plist); - mutex_lock(&oob->oo_debug_mutex); + cfs_mutex_lock(&oob->oo_debug_mutex); io->ci_obj = cl_object_top(obj); cl_io_init(env, io, CIT_MISC, io->ci_obj); cl_page_gang_lookup(env, obj, io, - descr->cld_start, descr->cld_end, plist, 0); + descr->cld_start, descr->cld_end, plist, 0, + NULL); cl_lock_page_list_fixup(env, io, lock, plist); if (plist->pl_nr > 0) { CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n"); @@ -1493,7 +1386,7 @@ static int osc_lock_has_pages(struct osc_lock *olck) cl_page_list_disown(env, io, plist); cl_page_list_fini(env, plist); cl_io_fini(env, io); - mutex_unlock(&oob->oo_debug_mutex); + cfs_mutex_unlock(&oob->oo_debug_mutex); cl_env_nested_put(&nest, env); } else result = 0; @@ -1521,8 +1414,7 @@ static void osc_lock_delete(const struct lu_env *env, LINVRNT(osc_lock_invariant(olck)); LINVRNT(!osc_lock_has_pages(olck)); - if (olck->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(olck); osc_lock_detach(env, olck); } @@ -1541,13 +1433,14 @@ static void osc_lock_state(const struct lu_env *env, enum cl_lock_state state) { struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); /* * XXX multiple io contexts can use the lock at the same time. */ LINVRNT(osc_lock_invariant(lock)); if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { + struct osc_io *oio = osc_env_io(env); + LASSERT(lock->ols_owner == NULL); lock->ols_owner = oio; } else if (state != CLS_HELD) @@ -1562,7 +1455,7 @@ static int osc_lock_print(const struct lu_env *env, void *cookie, /* * XXX print ldlm lock and einfo properly. */ - (*p)(env, cookie, "%p %08x "LPU64" %d %p ", + (*p)(env, cookie, "%p %08x "LPX64" %d %p ", lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, lock->ols_state, lock->ols_owner); osc_lvb_print(env, cookie, p, &lock->ols_lvb); @@ -1576,24 +1469,37 @@ static int osc_lock_fits_into(const struct lu_env *env, { struct osc_lock *ols = cl2osc_lock(slice); - /* If the lock hasn't ever enqueued, it can't be matched because - * enqueue process brings in many information which can be used to - * determine things such as lockless, CEF_MUST, etc. - */ - if (ols->ols_state < OLS_ENQUEUED) - return 0; - - /* Don't match this lock if the lock is able to become lockless lock. - * This is because the new lock might be covering a mmap region and - * so that it must have a cached at the local side. */ - if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable) + if (need->cld_enq_flags & CEF_NEVER) return 0; - /* If the lock is going to be canceled, no reason to match it as well */ - if (ols->ols_state > OLS_RELEASED) - return 0; - - /* go for it. */ + if (need->cld_mode == CLM_PHANTOM) { + /* + * Note: the QUEUED lock can't be matched here, otherwise + * it might cause the deadlocks. + * In read_process, + * P1: enqueued read lock, create sublock1 + * P2: enqueued write lock, create sublock2(conflicted + * with sublock1). + * P1: Grant read lock. + * P1: enqueued glimpse lock(with holding sublock1_read), + * matched with sublock2, waiting sublock2 to be granted. + * But sublock2 can not be granted, because P1 + * will not release sublock1. Bang! + */ + if (ols->ols_state < OLS_GRANTED || + ols->ols_state > OLS_RELEASED) + return 0; + } else if (need->cld_enq_flags & CEF_MUST) { + /* + * If the lock hasn't ever enqueued, it can't be matched + * because enqueue process brings in many information + * which can be used to determine things such as lockless, + * CEF_MUST, etc. + */ + if (ols->ols_state < OLS_UPCALL_RECEIVED && + ols->ols_locklessable) + return 0; + } return 1; } @@ -1611,14 +1517,6 @@ static const struct cl_lock_operations osc_lock_ops = { .clo_fits_into = osc_lock_fits_into, }; -static int osc_lock_lockless_enqueue(const struct lu_env *env, - const struct cl_lock_slice *slice, - struct cl_io *unused, __u32 enqflags) -{ - LBUG(); - return 0; -} - static int osc_lock_lockless_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) { @@ -1663,19 +1561,19 @@ static void osc_lock_lockless_state(const struct lu_env *env, enum cl_lock_state state) { struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); LINVRNT(osc_lock_invariant(lock)); if (state == CLS_HELD) { - LASSERT(lock->ols_owner == NULL); + struct osc_io *oio = osc_env_io(env); + + LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio)); lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's * host object */ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) oio->oi_lockless = 1; - } else - lock->ols_owner = NULL; + } } static int osc_lock_lockless_fits_into(const struct lu_env *env, @@ -1683,12 +1581,18 @@ static int osc_lock_lockless_fits_into(const struct lu_env *env, const struct cl_lock_descr *need, const struct cl_io *io) { - return 0; + struct osc_lock *lock = cl2osc_lock(slice); + + if (!(need->cld_enq_flags & CEF_NEVER)) + return 0; + + /* lockless lock should only be used by its owning io. b22147 */ + return (lock->ols_owner == osc_env_io(env)); } static const struct cl_lock_operations osc_lock_lockless_ops = { .clo_fini = osc_lock_fini, - .clo_enqueue = osc_lock_lockless_enqueue, + .clo_enqueue = osc_lock_enqueue, .clo_wait = osc_lock_lockless_wait, .clo_unuse = osc_lock_lockless_unuse, .clo_state = osc_lock_lockless_state, @@ -1707,6 +1611,7 @@ int osc_lock_init(const struct lu_env *env, OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO); if (clk != NULL) { osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); + cfs_atomic_set(&clk->ols_pageref, 0); clk->ols_state = OLS_NEW; cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops); result = 0; @@ -1715,5 +1620,26 @@ int osc_lock_init(const struct lu_env *env, return result; } +int osc_dlm_lock_pageref(struct ldlm_lock *dlm) +{ + struct osc_lock *olock; + int rc = 0; + + cfs_spin_lock(&osc_ast_guard); + olock = dlm->l_ast_data; + /* + * there's a very rare race with osc_page_addref_lock(), but that + * doesn't matter because in the worst case we don't cancel a lock + * which we actually can, that's no harm. + */ + if (olock != NULL && + cfs_atomic_add_return(_PAGEREF_MAGIC, + &olock->ols_pageref) != _PAGEREF_MAGIC) { + cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); + rc = 1; + } + cfs_spin_unlock(&osc_ast_guard); + return rc; +} /** @} osc */