X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Flustre_handles.c;h=6122f34702e60ac90fcfe303cc7a21a6741e0c34;hp=b19b469cb6f44d1a46d25a4d24288822460c4260;hb=d05fee1919911e56f6ef7eb018c90ae19fa415aa;hpb=cd09f5d6b99545414f393aa36f1cdf627a257811 diff --git a/lustre/obdclass/lustre_handles.c b/lustre/obdclass/lustre_handles.c index b19b469..6122f34 100644 --- a/lustre/obdclass/lustre_handles.c +++ b/lustre/obdclass/lustre_handles.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,243 +35,227 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef __KERNEL__ -# include -#endif + +#include #include #include #include -#if !defined(HAVE_RCU) || !defined(__KERNEL__) -# define list_add_rcu list_add -# define list_del_rcu list_del -# define list_for_each_rcu list_for_each -# define list_for_each_safe_rcu list_for_each_safe -# define rcu_read_lock() spin_lock(&bucket->lock) -# define rcu_read_unlock() spin_unlock(&bucket->lock) -#endif /* ifndef HAVE_RCU */ static __u64 handle_base; #define HANDLE_INCR 7 -static spinlock_t handle_base_lock; +static DEFINE_SPINLOCK(handle_base_lock); static struct handle_bucket { - spinlock_t lock; - struct list_head head; + spinlock_t lock; + struct list_head head; } *handle_hash; -static atomic_t handle_count = ATOMIC_INIT(0); - -#ifdef __arch_um__ -/* For unknown reason, UML uses kmalloc rather than vmalloc to allocate - * memory(OBD_VMALLOC). Therefore, we have to redefine the - * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K. - */ -#define HANDLE_HASH_SIZE 4096 -#else -#define HANDLE_HASH_SIZE (1 << 14) -#endif /* ifdef __arch_um__ */ - +#define HANDLE_HASH_SIZE (1 << 16) #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1) /* * Generate a unique 64bit cookie (hash) for a handle and insert it into * global (per-node) hash-table. */ -void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb) +void class_handle_hash(struct portals_handle *h, + struct portals_handle_ops *ops) { - struct handle_bucket *bucket; - ENTRY; - - LASSERT(h != NULL); - LASSERT(list_empty(&h->h_link)); - - /* - * This is fast, but simplistic cookie generation algorithm, it will - * need a re-do at some point in the future for security. - */ - spin_lock(&handle_base_lock); - handle_base += HANDLE_INCR; - - h->h_cookie = handle_base; - if (unlikely(handle_base == 0)) { - /* - * Cookie of zero is "dangerous", because in many places it's - * assumed that 0 means "unassigned" handle, not bound to any - * object. - */ - CWARN("The universe has been exhausted: cookie wrap-around.\n"); - handle_base += HANDLE_INCR; - } - spin_unlock(&handle_base_lock); - - atomic_inc(&handle_count); - h->h_addref = cb; - spin_lock_init(&h->h_lock); - - bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; - spin_lock(&bucket->lock); - list_add_rcu(&h->h_link, &bucket->head); - h->h_in = 1; - spin_unlock(&bucket->lock); - - CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n", - h, h->h_cookie); - EXIT; + struct handle_bucket *bucket; + + ENTRY; + + LASSERT(h != NULL); + LASSERT(list_empty(&h->h_link)); + + /* + * This is fast, but simplistic cookie generation algorithm, it will + * need a re-do at some point in the future for security. + */ + spin_lock(&handle_base_lock); + handle_base += HANDLE_INCR; + + if (unlikely(handle_base == 0)) { + /* + * Cookie of zero is "dangerous", because in many places it's + * assumed that 0 means "unassigned" handle, not bound to any + * object. + */ + CWARN("The universe has been exhausted: cookie wrap-around.\n"); + handle_base += HANDLE_INCR; + } + h->h_cookie = handle_base; + spin_unlock(&handle_base_lock); + + h->h_ops = ops; + spin_lock_init(&h->h_lock); + + bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; + spin_lock(&bucket->lock); + list_add_rcu(&h->h_link, &bucket->head); + h->h_in = 1; + spin_unlock(&bucket->lock); + + CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n", + h, h->h_cookie); + EXIT; } +EXPORT_SYMBOL(class_handle_hash); static void class_handle_unhash_nolock(struct portals_handle *h) { - if (list_empty(&h->h_link)) { - CERROR("removing an already-removed handle ("LPX64")\n", - h->h_cookie); - return; - } - - CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n", - h, h->h_cookie); - - spin_lock(&h->h_lock); - if (h->h_in == 0) { - spin_unlock(&h->h_lock); - return; - } - h->h_in = 0; - spin_unlock(&h->h_lock); - list_del_rcu(&h->h_link); + if (list_empty(&h->h_link)) { + CERROR("removing an already-removed handle (%#llx)\n", + h->h_cookie); + return; + } + + CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n", + h, h->h_cookie); + + spin_lock(&h->h_lock); + if (h->h_in == 0) { + spin_unlock(&h->h_lock); + return; + } + h->h_in = 0; + spin_unlock(&h->h_lock); + list_del_rcu(&h->h_link); } void class_handle_unhash(struct portals_handle *h) { - struct handle_bucket *bucket; - bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - - spin_lock(&bucket->lock); - class_handle_unhash_nolock(h); - spin_unlock(&bucket->lock); + struct handle_bucket *bucket; + bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - atomic_dec(&handle_count); + spin_lock(&bucket->lock); + class_handle_unhash_nolock(h); + spin_unlock(&bucket->lock); } +EXPORT_SYMBOL(class_handle_unhash); void class_handle_hash_back(struct portals_handle *h) { - struct handle_bucket *bucket; - ENTRY; + struct handle_bucket *bucket; + ENTRY; - bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); + bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); - atomic_inc(&handle_count); - spin_lock(&bucket->lock); - list_add_rcu(&h->h_link, &bucket->head); - h->h_in = 1; - spin_unlock(&bucket->lock); + spin_lock(&bucket->lock); + list_add_rcu(&h->h_link, &bucket->head); + h->h_in = 1; + spin_unlock(&bucket->lock); - EXIT; + EXIT; } +EXPORT_SYMBOL(class_handle_hash_back); -void *class_handle2object(__u64 cookie) +void *class_handle2object(__u64 cookie, const void *owner) { - struct handle_bucket *bucket; - struct list_head *tmp; - void *retval = NULL; - ENTRY; - - LASSERT(handle_hash != NULL); - - /* Be careful when you want to change this code. See the - * rcu_read_lock() definition on top this file. - jxiong */ - bucket = handle_hash + (cookie & HANDLE_HASH_MASK); - - rcu_read_lock(); - list_for_each_rcu(tmp, &bucket->head) { - struct portals_handle *h; - h = list_entry(tmp, struct portals_handle, h_link); - if (h->h_cookie != cookie) - continue; - - spin_lock(&h->h_lock); - if (likely(h->h_in != 0)) { - h->h_addref(h); - retval = h; - } - spin_unlock(&h->h_lock); - break; - } - rcu_read_unlock(); - - RETURN(retval); + struct handle_bucket *bucket; + struct portals_handle *h; + void *retval = NULL; + + ENTRY; + + LASSERT(handle_hash != NULL); + + /* + * Be careful when you want to change this code. See the + * rcu_read_lock() definition on top this file. - jxiong + */ + bucket = handle_hash + (cookie & HANDLE_HASH_MASK); + + rcu_read_lock(); + list_for_each_entry_rcu(h, &bucket->head, h_link) { + if (h->h_cookie != cookie || h->h_owner != owner) + continue; + + spin_lock(&h->h_lock); + if (likely(h->h_in != 0)) { + h->h_ops->hop_addref(h); + retval = h; + } + spin_unlock(&h->h_lock); + break; + } + rcu_read_unlock(); + + RETURN(retval); } +EXPORT_SYMBOL(class_handle2object); void class_handle_free_cb(struct rcu_head *rcu) { - struct portals_handle *h = RCU2HANDLE(rcu); - if (h->h_free_cb) { - h->h_free_cb(h->h_ptr, h->h_size); - } else { - void *ptr = h->h_ptr; - unsigned int size = h->h_size; - OBD_FREE(ptr, size); - } + struct portals_handle *h; + void *ptr; + + h = container_of(rcu, struct portals_handle, h_rcu); + ptr = (void *)(unsigned long)h->h_cookie; + + if (h->h_ops->hop_free != NULL) + h->h_ops->hop_free(ptr, h->h_size); + else + OBD_FREE(ptr, h->h_size); } +EXPORT_SYMBOL(class_handle_free_cb); int class_handle_init(void) { - struct handle_bucket *bucket; + struct handle_bucket *bucket; - LASSERT(handle_hash == NULL); + LASSERT(handle_hash == NULL); - OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE); - if (handle_hash == NULL) - return -ENOMEM; + OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE); + if (handle_hash == NULL) + return -ENOMEM; - spin_lock_init(&handle_base_lock); - for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; - bucket--) { - CFS_INIT_LIST_HEAD(&bucket->head); - spin_lock_init(&bucket->lock); - } - ll_get_random_bytes(&handle_base, sizeof(handle_base)); - LASSERT(handle_base != 0ULL); + for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; + bucket--) { + INIT_LIST_HEAD(&bucket->head); + spin_lock_init(&bucket->lock); + } - return 0; + get_random_bytes(&handle_base, sizeof(handle_base)); + LASSERT(handle_base != 0ULL); + + return 0; } -static void cleanup_all_handles(void) +static int cleanup_all_handles(void) { - int i; - - for (i = 0; i < HANDLE_HASH_SIZE; i++) { - struct list_head *pos, *n; - n = NULL; - spin_lock(&handle_hash[i].lock); - list_for_each_safe_rcu(pos, n, &(handle_hash[i].head)) { - struct portals_handle *h; - h = list_entry(pos, struct portals_handle, h_link); - - CERROR("force clean handle "LPX64" addr %p addref %p\n", - h->h_cookie, h, h->h_addref); - - class_handle_unhash_nolock(h); - } - spin_unlock(&handle_hash[i].lock); - } + int rc; + int i; + + for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) { + struct portals_handle *h; + + spin_lock(&handle_hash[i].lock); + list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) { + CERROR("force clean handle %#llx addr %p ops %p\n", + h->h_cookie, h, h->h_ops); + + class_handle_unhash_nolock(h); + rc++; + } + spin_unlock(&handle_hash[i].lock); + } + + return rc; } void class_handle_cleanup(void) { - int count; - LASSERT(handle_hash != NULL); + int count; + + LASSERT(handle_hash != NULL); - count = atomic_read(&handle_count); - if (count != 0) { - CERROR("handle_count at cleanup: %d\n", count); - cleanup_all_handles(); - } + count = cleanup_all_handles(); - OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE); - handle_hash = NULL; + OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE); + handle_hash = NULL; - if (atomic_read(&handle_count)) - CERROR("leaked %d handles\n", atomic_read(&handle_count)); + if (count != 0) + CERROR("handle_count at cleanup: %d\n", count); }