X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fl_lock.c;h=b652097355ca025fe4a40ee4f2902b99e253b4d4;hb=55e8287b7024ee4ac280806caea9636dd454a0ed;hp=2a4f83233485c496726c785d024426dd769a4352;hpb=250866233e4ae873f047277f990ce647cb5de246;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/l_lock.c b/lustre/ldlm/l_lock.c index 2a4f832..b652097 100644 --- a/lustre/ldlm/l_lock.c +++ b/lustre/ldlm/l_lock.c @@ -48,91 +48,39 @@ #include #include -/* invariants: - - only the owner of the lock changes l_owner/l_depth - - if a non-owner changes or checks the variables a spin lock is taken -*/ - -void l_lock_init(struct lustre_lock *lock) +/* + * ldlm locking uses resource to serialize access to locks + * but there is a case when we change resource of lock upon + * enqueue reply. we rely on that lock->l_resource = new_res + * is atomic + */ +struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock) { - sema_init(&lock->l_sem, 1); - spin_lock_init(&lock->l_spin); + struct ldlm_resource *res = lock->l_resource; + + if (!res->lr_namespace->ns_client) { + /* on server-side resource of lock doesn't change */ + lock_res(res); + return res; + } + + lock_bitlock(lock); + res = lock->l_resource; + lock_res(res); + return res; } -void l_lock(struct lustre_lock *lock) +void unlock_res_and_lock(struct ldlm_lock *lock) { - int owner = 0; - - spin_lock(&lock->l_spin); - if (lock->l_owner == current) - owner = 1; - spin_unlock(&lock->l_spin); - - /* This is safe to increment outside the spinlock because we - * can only have 1 CPU running on the current task - * (i.e. l_owner == current), regardless of the number of CPUs. - */ - if (owner) { - ++lock->l_depth; - } else { - down(&lock->l_sem); - spin_lock(&lock->l_spin); - lock->l_owner = current; - lock->l_depth = 0; - spin_unlock(&lock->l_spin); - } -} + struct ldlm_resource *res = lock->l_resource; -void l_unlock(struct lustre_lock *lock) -{ - LASSERT(lock->l_owner == current); - LASSERT(lock->l_depth >= 0); - - spin_lock(&lock->l_spin); - if (--lock->l_depth < 0) { - lock->l_owner = NULL; - spin_unlock(&lock->l_spin); - up(&lock->l_sem); + if (!res->lr_namespace->ns_client) { + /* on server-side resource of lock doesn't change */ + unlock_res(res); return; } - spin_unlock(&lock->l_spin); -} - -int l_has_lock(struct lustre_lock *lock) -{ - int depth = -1, owner = 0; - - spin_lock(&lock->l_spin); - if (lock->l_owner == current) { - depth = lock->l_depth; - owner = 1; - } - spin_unlock(&lock->l_spin); - if (depth >= 0) - CDEBUG(D_INFO, "lock_depth: %d\n", depth); - return owner; + unlock_res(res); + unlock_bitlock(lock); } -#ifdef __KERNEL__ -#include -void l_check_no_ns_lock(struct ldlm_namespace *ns) -{ - static unsigned long next_msg; - - if (l_has_lock(&ns->ns_lock) && time_after(jiffies, next_msg)) { - CERROR("namespace %s lock held during RPCs; tell phil\n", - ns->ns_name); -#if (LUSTRE_KERNEL_VERSION >= 30) - CERROR(portals_debug_dumpstack()); -#endif - next_msg = jiffies + 60 * HZ; - } -} - -#else -void l_check_no_ns_lock(struct ldlm_namespace *ns) -{ -#warning "FIXME: check lock in user space??" -} -#endif /* __KERNEL__ */