Whamcloud - gitweb
LU-13090 utils: fix lfs_migrate -p for file with pool
[fs/lustre-release.git] / libcfs / libcfs / libcfs_lock.c
index b9e2f25..c6ba9e7 100644 (file)
@@ -21,7 +21,7 @@
  * GPL HEADER END
  */
 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, 2015, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Author: liang@whamcloud.com
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_LNET
 
 #include <libcfs/libcfs.h>
 
-#ifdef __KERNEL__
-
 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
 void
 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
@@ -49,7 +44,7 @@ cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
        cfs_percpt_free(pcl->pcl_locks);
        LIBCFS_FREE(pcl, sizeof(*pcl));
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock_free);
+EXPORT_SYMBOL(cfs_percpt_lock_free);
 
 /**
  * create cpu-partition lock, see libcfs_private.h for more detail.
@@ -59,10 +54,11 @@ CFS_EXPORT_SYMBOL(cfs_percpt_lock_free);
  * reason we always allocate cacheline-aligned memory block.
  */
 struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+                      struct lock_class_key *keys)
 {
        struct cfs_percpt_lock  *pcl;
-       cfs_spinlock_t          *lock;
+       spinlock_t              *lock;
        int                     i;
 
        /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
@@ -77,12 +73,20 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
                return NULL;
        }
 
-       cfs_percpt_for_each(lock, i, pcl->pcl_locks)
-               cfs_spin_lock_init(lock);
+       if (keys == NULL) {
+               CWARN("Cannot setup class key for percpt lock, you may see "
+                     "recursive locking warnings which are actually fake.\n");
+       }
+
+       cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
+               spin_lock_init(lock);
+               if (keys != NULL)
+                       lockdep_set_class(lock, &keys[i]);
+       }
 
        return pcl;
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
 
 /**
  * lock a CPU partition
@@ -95,6 +99,7 @@ CFS_EXPORT_SYMBOL(cfs_percpt_lock_alloc);
  */
 void
 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+__acquires(pcl->pcl_locks)
 {
        int     ncpt = cfs_cpt_number(pcl->pcl_cptab);
        int     i;
@@ -109,13 +114,13 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
        }
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_lock(pcl->pcl_locks[index]);
+               spin_lock(pcl->pcl_locks[index]);
                return;
        }
 
        /* exclusive lock request */
        for (i = 0; i < ncpt; i++) {
-               cfs_spin_lock(pcl->pcl_locks[i]);
+               spin_lock(pcl->pcl_locks[i]);
                if (i == 0) {
                        LASSERT(!pcl->pcl_locked);
                        /* nobody should take private lock after this
@@ -124,11 +129,12 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
                }
        }
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock);
+EXPORT_SYMBOL(cfs_percpt_lock);
 
 /** unlock a CPU partition */
 void
 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+__releases(pcl->pcl_locks)
 {
        int     ncpt = cfs_cpt_number(pcl->pcl_cptab);
        int     i;
@@ -136,7 +142,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
        index = ncpt == 1 ? 0 : index;
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_unlock(pcl->pcl_locks[index]);
+               spin_unlock(pcl->pcl_locks[index]);
                return;
        }
 
@@ -145,111 +151,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
                        LASSERT(pcl->pcl_locked);
                        pcl->pcl_locked = 0;
                }
-               cfs_spin_unlock(pcl->pcl_locks[i]);
+               spin_unlock(pcl->pcl_locks[i]);
        }
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
-
-#else /* !__KERNEL__ */
-# ifdef HAVE_LIBPTHREAD
-
-struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
-{
-       struct cfs_percpt_lock *pcl;
-
-       CFS_ALLOC_PTR(pcl);
-       if (pcl != NULL)
-               pthread_mutex_init(&pcl->pcl_mutex, NULL);
-
-       return pcl;
-}
-
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
-       pthread_mutex_destroy(&pcl->pcl_mutex);
-       CFS_FREE_PTR(pcl);
-}
-
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int lock)
-{
-       pthread_mutex_lock(&(pcl)->pcl_mutex);
-}
-
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int lock)
-{
-       pthread_mutex_unlock(&(pcl)->pcl_mutex);
-}
-
-# else /* !HAVE_LIBPTHREAD */
-
-struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
-{
-       return (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC;
-}
-
-void
-cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
-{
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
-}
-
-void
-cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
-{
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
-}
-
-void
-cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
-{
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
-}
-
-# endif /* HAVE_LIBPTHREAD */
-#endif /* __KERNEL__ */
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(cfs_atomic_t **refs)
-{
-       cfs_percpt_free(refs);
-}
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-cfs_atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
-       cfs_atomic_t    **refs;
-       cfs_atomic_t    *ref;
-       int             i;
-
-       refs = cfs_percpt_alloc(cptab, sizeof(*ref));
-       if (refs == NULL)
-               return NULL;
-
-       cfs_percpt_for_each(ref, i, refs)
-               cfs_atomic_set(ref, init_val);
-       return refs;
-}
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(cfs_atomic_t **refs)
-{
-       cfs_atomic_t    *ref;
-       int             i;
-       int             val = 0;
-
-       cfs_percpt_for_each(ref, i, refs)
-               val += cfs_atomic_read(ref);
-
-       return val;
-}
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_summary);
+EXPORT_SYMBOL(cfs_percpt_unlock);