X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_lock.c;h=6aaf676452d6f0310d5b3e2cf4ccf2b90384de27;hp=58ca3f3b59caaa092460e5fdbf3855f88c597c2c;hb=8701e7e4b5ec1b34700c95b9b6588f4745730b72;hpb=eb475423196375e302ea6130e7a53b4d1d016e09;ds=sidebyside diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 58ca3f3..6aaf676 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -41,6 +41,7 @@ #define DEBUG_SUBSYSTEM S_CLASS +#include #include #include #include @@ -108,9 +109,9 @@ static int cl_lock_invariant(const struct lu_env *env, result = atomic_read(&lock->cll_ref) > 0 && cl_lock_invariant_trusted(env, lock); - if (!result && env != NULL) - CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); - return result; + if (!result && env != NULL) + CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n"); + return result; } /** @@ -203,12 +204,12 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, struct cl_object *obj, const struct cl_lock_operations *ops) { - ENTRY; - slice->cls_lock = lock; - cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers); - slice->cls_obj = obj; - slice->cls_ops = ops; - EXIT; + ENTRY; + slice->cls_lock = lock; + list_add_tail(&slice->cls_linkage, &lock->cll_layers); + slice->cls_obj = obj; + slice->cls_ops = ops; + EXIT; } EXPORT_SYMBOL(cl_lock_slice_add); @@ -268,12 +269,12 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) ENTRY; cl_lock_trace(D_DLMTRACE, env, "free lock", lock); - while (!cfs_list_empty(&lock->cll_layers)) { + while (!list_empty(&lock->cll_layers)) { struct cl_lock_slice *slice; - slice = cfs_list_entry(lock->cll_layers.next, - struct cl_lock_slice, cls_linkage); - cfs_list_del_init(lock->cll_layers.next); + slice = list_entry(lock->cll_layers.next, + struct cl_lock_slice, cls_linkage); + list_del_init(lock->cll_layers.next); slice->cls_ops->clo_fini(env, slice); } CS_LOCK_DEC(obj, total); @@ -310,7 +311,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) if (atomic_dec_and_test(&lock->cll_ref)) { if (lock->cll_state == CLS_FREEING) { - LASSERT(cfs_list_empty(&lock->cll_linkage)); + LASSERT(list_empty(&lock->cll_linkage)); cl_lock_free(env, lock); } CS_LOCK_DEC(obj, busy); @@ -370,25 +371,25 @@ static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock) } static struct cl_lock *cl_lock_alloc(const struct lu_env *env, - struct cl_object *obj, - const struct cl_io *io, - const struct cl_lock_descr *descr) + struct cl_object *obj, + const struct cl_io *io, + const struct cl_lock_descr *descr) { - struct cl_lock *lock; - struct lu_object_header *head; + struct cl_lock *lock; + struct lu_object_header *head; - ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO); - if (lock != NULL) { + ENTRY; + OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS); + if (lock != NULL) { atomic_set(&lock->cll_ref, 1); - lock->cll_descr = *descr; - lock->cll_state = CLS_NEW; - cl_object_get(obj); + lock->cll_descr = *descr; + lock->cll_state = CLS_NEW; + cl_object_get(obj); lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock); - CFS_INIT_LIST_HEAD(&lock->cll_layers); - CFS_INIT_LIST_HEAD(&lock->cll_linkage); - CFS_INIT_LIST_HEAD(&lock->cll_inclosure); + INIT_LIST_HEAD(&lock->cll_layers); + INIT_LIST_HEAD(&lock->cll_linkage); + INIT_LIST_HEAD(&lock->cll_inclosure); lu_ref_init(&lock->cll_reference); lu_ref_init(&lock->cll_holders); mutex_init(&lock->cll_guard); @@ -399,20 +400,19 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, CS_LOCK_INC(obj, total); CS_LOCK_INC(obj, create); cl_lock_lockdep_init(lock); - cfs_list_for_each_entry(obj, &head->loh_layers, - co_lu.lo_linkage) { - int err; - - err = obj->co_ops->coo_lock_init(env, obj, lock, io); - if (err != 0) { - cl_lock_finish(env, lock); - lock = ERR_PTR(err); - break; - } - } - } else - lock = ERR_PTR(-ENOMEM); - RETURN(lock); + list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) { + int err; + + err = obj->co_ops->coo_lock_init(env, obj, lock, io); + if (err != 0) { + cl_lock_finish(env, lock); + lock = ERR_PTR(err); + break; + } + } + } else + lock = ERR_PTR(-ENOMEM); + RETURN(lock); } /** @@ -480,7 +480,7 @@ static int cl_lock_fits_into(const struct lu_env *env, LINVRNT(cl_lock_invariant_trusted(env, lock)); ENTRY; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_fits_into != NULL && !slice->cls_ops->clo_fits_into(env, slice, need, io)) RETURN(0); @@ -499,9 +499,9 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env, ENTRY; head = cl_object_header(obj); - LINVRNT(spin_is_locked(&head->coh_lock_guard)); + assert_spin_locked(&head->coh_lock_guard); CS_LOCK_INC(obj, lookup); - cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) { + list_for_each_entry(lock, &head->coh_locks, cll_linkage) { int matched; matched = cl_lock_ext_match(&lock->cll_descr, need) && @@ -556,8 +556,9 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env, spin_lock(&head->coh_lock_guard); ghost = cl_lock_lookup(env, obj, io, need); if (ghost == NULL) { - cfs_list_add_tail(&lock->cll_linkage, - &head->coh_locks); + cl_lock_get_trust(lock); + list_add_tail(&lock->cll_linkage, + &head->coh_locks); spin_unlock(&head->coh_lock_guard); CS_LOCK_INC(obj, busy); } else { @@ -643,7 +644,7 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, LINVRNT(cl_lock_invariant_trusted(NULL, lock)); ENTRY; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype) RETURN(slice); } @@ -800,8 +801,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) const struct cl_lock_slice *slice; lock->cll_flags |= CLF_CANCELLED; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { if (slice->cls_ops->clo_cancel != NULL) slice->cls_ops->clo_cancel(env, slice); } @@ -819,35 +820,42 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) ENTRY; if (lock->cll_state < CLS_FREEING) { + bool in_cache; + LASSERT(lock->cll_state != CLS_INTRANSIT); cl_lock_state_set(env, lock, CLS_FREEING); head = cl_object_header(lock->cll_descr.cld_obj); spin_lock(&head->coh_lock_guard); - cfs_list_del_init(&lock->cll_linkage); + in_cache = !list_empty(&lock->cll_linkage); + if (in_cache) + list_del_init(&lock->cll_linkage); spin_unlock(&head->coh_lock_guard); - /* - * From now on, no new references to this lock can be acquired - * by cl_lock_lookup(). - */ - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_delete != NULL) - slice->cls_ops->clo_delete(env, slice); - } - /* - * From now on, no new references to this lock can be acquired - * by layer-specific means (like a pointer from struct - * ldlm_lock in osc, or a pointer from top-lock to sub-lock in - * lov). - * - * Lock will be finally freed in cl_lock_put() when last of - * existing references goes away. - */ - } - EXIT; + if (in_cache) /* coh_locks cache holds a refcount. */ + cl_lock_put(env, lock); + + /* + * From now on, no new references to this lock can be acquired + * by cl_lock_lookup(). + */ + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { + if (slice->cls_ops->clo_delete != NULL) + slice->cls_ops->clo_delete(env, slice); + } + /* + * From now on, no new references to this lock can be acquired + * by layer-specific means (like a pointer from struct + * ldlm_lock in osc, or a pointer from top-lock to sub-lock in + * lov). + * + * Lock will be finally freed in cl_lock_put() when last of + * existing references goes away. + */ + } + EXIT; } /** @@ -1000,7 +1008,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_is_mutexed(lock)); LINVRNT(cl_lock_invariant(env, lock)); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) if (slice->cls_ops->clo_state != NULL) slice->cls_ops->clo_state(env, slice, state); wake_up_all(&lock->cll_wq); @@ -1058,29 +1066,29 @@ EXPORT_SYMBOL(cl_lock_state_set); static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) { - const struct cl_lock_slice *slice; - int result; - - do { - result = 0; + const struct cl_lock_slice *slice; + int result; - LINVRNT(cl_lock_is_mutexed(lock)); - LINVRNT(cl_lock_invariant(env, lock)); - LASSERT(lock->cll_state == CLS_INTRANSIT); + do { + result = 0; - result = -ENOSYS; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_unuse != NULL) { - result = slice->cls_ops->clo_unuse(env, slice); - if (result != 0) - break; - } - } - LASSERT(result != -ENOSYS); - } while (result == CLO_REPEAT); + LINVRNT(cl_lock_is_mutexed(lock)); + LINVRNT(cl_lock_invariant(env, lock)); + LASSERT(lock->cll_state == CLS_INTRANSIT); + + result = -ENOSYS; + list_for_each_entry_reverse(slice, &lock->cll_layers, + cls_linkage) { + if (slice->cls_ops->clo_unuse != NULL) { + result = slice->cls_ops->clo_unuse(env, slice); + if (result != 0) + break; + } + } + LASSERT(result != -ENOSYS); + } while (result == CLO_REPEAT); - return result; + return result; } /** @@ -1104,7 +1112,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) result = -ENOSYS; state = cl_lock_intransit(env, lock); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_use != NULL) { result = slice->cls_ops->clo_use(env, slice); if (result != 0) @@ -1157,7 +1165,7 @@ static int cl_enqueue_kick(const struct lu_env *env, ENTRY; result = -ENOSYS; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_enqueue != NULL) { result = slice->cls_ops->clo_enqueue(env, slice, io, flags); @@ -1487,7 +1495,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) break; result = -ENOSYS; - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_wait != NULL) { result = slice->cls_ops->clo_wait(env, slice); if (result != 0) @@ -1560,7 +1568,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); pound = 0; - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_weigh != NULL) { ounce = slice->cls_ops->clo_weigh(env, slice); pound += ounce; @@ -1597,7 +1605,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_is_mutexed(lock)); LINVRNT(cl_lock_invariant(env, lock)); - cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_modify != NULL) { result = slice->cls_ops->clo_modify(env, slice, desc); if (result != 0) @@ -1630,7 +1638,7 @@ void cl_lock_closure_init(const struct lu_env *env, LINVRNT(cl_lock_is_mutexed(origin)); LINVRNT(cl_lock_invariant(env, origin)); - CFS_INIT_LIST_HEAD(&closure->clc_list); + INIT_LIST_HEAD(&closure->clc_list); closure->clc_origin = origin; closure->clc_wait = wait; closure->clc_nr = 0; @@ -1659,7 +1667,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, result = cl_lock_enclosure(env, lock, closure); if (result == 0) { - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { if (slice->cls_ops->clo_closure != NULL) { result = slice->cls_ops->clo_closure(env, slice, closure); @@ -1692,10 +1700,10 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, * If lock->cll_inclosure is not empty, lock is already in * this closure. */ - if (cfs_list_empty(&lock->cll_inclosure)) { + if (list_empty(&lock->cll_inclosure)) { cl_lock_get_trust(lock); lu_ref_add(&lock->cll_reference, "closure", closure); - cfs_list_add(&lock->cll_inclosure, &closure->clc_list); + list_add(&lock->cll_inclosure, &closure->clc_list); closure->clc_nr++; } else cl_lock_mutex_put(env, lock); @@ -1725,19 +1733,19 @@ EXPORT_SYMBOL(cl_lock_enclosure); void cl_lock_disclosure(const struct lu_env *env, struct cl_lock_closure *closure) { - struct cl_lock *scan; - struct cl_lock *temp; - - cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); - cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list, - cll_inclosure){ - cfs_list_del_init(&scan->cll_inclosure); - cl_lock_mutex_put(env, scan); - lu_ref_del(&scan->cll_reference, "closure", closure); - cl_lock_put(env, scan); - closure->clc_nr--; - } - LASSERT(closure->clc_nr == 0); + struct cl_lock *scan; + struct cl_lock *temp; + + cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); + list_for_each_entry_safe(scan, temp, &closure->clc_list, + cll_inclosure){ + list_del_init(&scan->cll_inclosure); + cl_lock_mutex_put(env, scan); + lu_ref_del(&scan->cll_reference, "closure", closure); + cl_lock_put(env, scan); + closure->clc_nr--; + } + LASSERT(closure->clc_nr == 0); } EXPORT_SYMBOL(cl_lock_disclosure); @@ -1745,7 +1753,7 @@ EXPORT_SYMBOL(cl_lock_disclosure); void cl_lock_closure_fini(struct cl_lock_closure *closure) { LASSERT(closure->clc_nr == 0); - LASSERT(cfs_list_empty(&closure->clc_list)); + LASSERT(list_empty(&closure->clc_list)); } EXPORT_SYMBOL(cl_lock_closure_fini); @@ -1868,7 +1876,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one * with a uniq gid and it conflicts with all other lock modes too */ - cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) { + list_for_each_entry(scan, &head->coh_locks, cll_linkage) { if (scan != except && (scan->cll_descr.cld_mode == CLM_GROUP || cl_lock_ext_match(&scan->cll_descr, need)) && @@ -1910,7 +1918,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) head = cl_object_header(obj); spin_lock(&head->coh_lock_guard); - while (!cfs_list_empty(&head->coh_locks)) { + while (!list_empty(&head->coh_locks)) { lock = container_of(head->coh_locks.next, struct cl_lock, cll_linkage); cl_lock_get_trust(lock); @@ -2128,10 +2136,8 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode) [CLM_WRITE] = "W", [CLM_GROUP] = "G" }; - if (0 <= mode && mode < ARRAY_SIZE(names)) - return names[mode]; - else - return "U"; + CLASSERT(CLM_MAX == ARRAY_SIZE(names)); + return names[mode]; } EXPORT_SYMBOL(cl_lock_mode_name); @@ -2163,7 +2169,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie, cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); (*printer)(env, cookie, " {\n"); - cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { + list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { (*printer)(env, cookie, " %s@%p: ", slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, slice);