X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flustre_handles.c;h=7a7c19f829c373c54cadc333fa3cb183af024137;hb=9fb46705ae86aa2c0ac29427f0ff24f923560eb7;hp=6cf943b6db85f985264122c019f8d2392801c963;hpb=637b7bca5e06c27a402fc4b4c51bb833e3b8a785;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lustre_handles.c b/lustre/obdclass/lustre_handles.c index 6cf943b..7a7c19f 100644 --- a/lustre/obdclass/lustre_handles.c +++ b/lustre/obdclass/lustre_handles.c @@ -53,17 +53,17 @@ # define list_for_each_rcu cfs_list_for_each # define list_for_each_safe_rcu cfs_list_for_each_safe # define list_for_each_entry_rcu cfs_list_for_each_entry -# define rcu_read_lock() cfs_spin_lock(&bucket->lock) -# define rcu_read_unlock() cfs_spin_unlock(&bucket->lock) +# define rcu_read_lock() spin_lock(&bucket->lock) +# define rcu_read_unlock() spin_unlock(&bucket->lock) #endif /* !__KERNEL__ */ static __u64 handle_base; #define HANDLE_INCR 7 -static cfs_spinlock_t handle_base_lock; +static spinlock_t handle_base_lock; static struct handle_bucket { - cfs_spinlock_t lock; - cfs_list_t head; + spinlock_t lock; + cfs_list_t head; } *handle_hash; #ifdef __arch_um__ @@ -95,33 +95,33 @@ void class_handle_hash(struct portals_handle *h, * This is fast, but simplistic cookie generation algorithm, it will * need a re-do at some point in the future for security. */ - cfs_spin_lock(&handle_base_lock); - handle_base += HANDLE_INCR; - - h->h_cookie = handle_base; - if (unlikely(handle_base == 0)) { - /* - * Cookie of zero is "dangerous", because in many places it's - * assumed that 0 means "unassigned" handle, not bound to any - * object. - */ - CWARN("The universe has been exhausted: cookie wrap-around.\n"); - handle_base += HANDLE_INCR; - } - cfs_spin_unlock(&handle_base_lock); + spin_lock(&handle_base_lock); + handle_base += HANDLE_INCR; + + h->h_cookie = handle_base; + if (unlikely(handle_base == 0)) { + /* + * Cookie of zero is "dangerous", because in many places it's + * assumed that 0 means "unassigned" handle, not bound to any + * object. + */ + CWARN("The universe has been exhausted: cookie wrap-around.\n"); + handle_base += HANDLE_INCR; + } + spin_unlock(&handle_base_lock); h->h_ops = ops; - cfs_spin_lock_init(&h->h_lock); + spin_lock_init(&h->h_lock); - bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; - cfs_spin_lock(&bucket->lock); - list_add_rcu(&h->h_link, &bucket->head); - h->h_in = 1; - cfs_spin_unlock(&bucket->lock); + bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; + spin_lock(&bucket->lock); + list_add_rcu(&h->h_link, &bucket->head); + h->h_in = 1; + spin_unlock(&bucket->lock); - CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n", - h, h->h_cookie); - EXIT; + CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n", + h, h->h_cookie); + EXIT; } EXPORT_SYMBOL(class_handle_hash); @@ -136,40 +136,40 @@ static void class_handle_unhash_nolock(struct portals_handle *h) CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n", h, h->h_cookie); - cfs_spin_lock(&h->h_lock); - if (h->h_in == 0) { - cfs_spin_unlock(&h->h_lock); - return; - } - h->h_in = 0; - cfs_spin_unlock(&h->h_lock); - list_del_rcu(&h->h_link); + spin_lock(&h->h_lock); + if (h->h_in == 0) { + spin_unlock(&h->h_lock); + return; + } + h->h_in = 0; + spin_unlock(&h->h_lock); + list_del_rcu(&h->h_link); } void class_handle_unhash(struct portals_handle *h) { - struct handle_bucket *bucket; - bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); + struct handle_bucket *bucket; + bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - cfs_spin_lock(&bucket->lock); - class_handle_unhash_nolock(h); - cfs_spin_unlock(&bucket->lock); + spin_lock(&bucket->lock); + class_handle_unhash_nolock(h); + spin_unlock(&bucket->lock); } EXPORT_SYMBOL(class_handle_unhash); void class_handle_hash_back(struct portals_handle *h) { - struct handle_bucket *bucket; - ENTRY; + struct handle_bucket *bucket; + ENTRY; - bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); + bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - cfs_spin_lock(&bucket->lock); - list_add_rcu(&h->h_link, &bucket->head); - h->h_in = 1; - cfs_spin_unlock(&bucket->lock); + spin_lock(&bucket->lock); + list_add_rcu(&h->h_link, &bucket->head); + h->h_in = 1; + spin_unlock(&bucket->lock); - EXIT; + EXIT; } EXPORT_SYMBOL(class_handle_hash_back); @@ -191,17 +191,17 @@ void *class_handle2object(__u64 cookie) if (h->h_cookie != cookie) continue; - cfs_spin_lock(&h->h_lock); - if (likely(h->h_in != 0)) { + spin_lock(&h->h_lock); + if (likely(h->h_in != 0)) { h->h_ops->hop_addref(h); - retval = h; - } - cfs_spin_unlock(&h->h_lock); - break; - } - rcu_read_unlock(); - - RETURN(retval); + retval = h; + } + spin_unlock(&h->h_lock); + break; + } + rcu_read_unlock(); + + RETURN(retval); } EXPORT_SYMBOL(class_handle2object); @@ -229,12 +229,12 @@ int class_handle_init(void) if (handle_hash == NULL) return -ENOMEM; - cfs_spin_lock_init(&handle_base_lock); - for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; - bucket--) { - CFS_INIT_LIST_HEAD(&bucket->head); - cfs_spin_lock_init(&bucket->lock); - } + spin_lock_init(&handle_base_lock); + for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; + bucket--) { + CFS_INIT_LIST_HEAD(&bucket->head); + spin_lock_init(&bucket->lock); + } /** bug 21430: add randomness to the initial base */ cfs_get_random_bytes(seed, sizeof(seed)); @@ -249,24 +249,24 @@ int class_handle_init(void) static int cleanup_all_handles(void) { - int rc; - int i; + int rc; + int i; - for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) { - struct portals_handle *h; + for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) { + struct portals_handle *h; - cfs_spin_lock(&handle_hash[i].lock); - list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) { + spin_lock(&handle_hash[i].lock); + list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) { CERROR("force clean handle "LPX64" addr %p ops %p\n", h->h_cookie, h, h->h_ops); - class_handle_unhash_nolock(h); - rc++; - } - cfs_spin_unlock(&handle_hash[i].lock); - } + class_handle_unhash_nolock(h); + rc++; + } + spin_unlock(&handle_hash[i].lock); + } - return rc; + return rc; } void class_handle_cleanup(void)