1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lustre_handles.c
38 * Author: Phil Schwan <phil@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # include <liblustre.h>
46 #include <obd_support.h>
47 #include <lustre_handles.h>
48 #include <lustre_lib.h>
50 #if !defined(HAVE_RCU) || !defined(__KERNEL__)
51 # define list_add_rcu list_add
52 # define list_del_rcu list_del
53 # define list_for_each_rcu list_for_each
54 # define list_for_each_safe_rcu list_for_each_safe
55 # define rcu_read_lock() spin_lock(&bucket->lock)
56 # define rcu_read_unlock() spin_unlock(&bucket->lock)
57 # define list_for_each_entry_rcu list_for_each_entry
58 #endif /* ifndef HAVE_RCU */
60 static __u64 handle_base;
62 static spinlock_t handle_base_lock;
64 static struct handle_bucket {
66 struct list_head head;
69 static atomic_t handle_count = ATOMIC_INIT(0);
72 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
73 * memory(OBD_VMALLOC). Therefore, we have to redefine the
74 * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
76 #define HANDLE_HASH_SIZE 4096
78 #define HANDLE_HASH_SIZE (1 << 14)
79 #endif /* ifdef __arch_um__ */
81 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
84 * Generate a unique 64bit cookie (hash) for a handle and insert it into
85 * global (per-node) hash-table.
87 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
89 struct handle_bucket *bucket;
93 LASSERT(list_empty(&h->h_link));
96 * This is fast, but simplistic cookie generation algorithm, it will
97 * need a re-do at some point in the future for security.
99 spin_lock(&handle_base_lock);
100 handle_base += HANDLE_INCR;
102 h->h_cookie = handle_base;
103 if (unlikely(handle_base == 0)) {
105 * Cookie of zero is "dangerous", because in many places it's
106 * assumed that 0 means "unassigned" handle, not bound to any
109 CWARN("The universe has been exhausted: cookie wrap-around.\n");
110 handle_base += HANDLE_INCR;
112 spin_unlock(&handle_base_lock);
114 atomic_inc(&handle_count);
116 spin_lock_init(&h->h_lock);
118 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
119 spin_lock(&bucket->lock);
120 list_add_rcu(&h->h_link, &bucket->head);
122 spin_unlock(&bucket->lock);
124 CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
129 static void class_handle_unhash_nolock(struct portals_handle *h)
131 if (list_empty(&h->h_link)) {
132 CERROR("removing an already-removed handle ("LPX64")\n",
137 CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
140 spin_lock(&h->h_lock);
142 spin_unlock(&h->h_lock);
146 spin_unlock(&h->h_lock);
147 list_del_rcu(&h->h_link);
150 void class_handle_unhash(struct portals_handle *h)
152 struct handle_bucket *bucket;
153 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
155 spin_lock(&bucket->lock);
156 class_handle_unhash_nolock(h);
157 spin_unlock(&bucket->lock);
159 atomic_dec(&handle_count);
162 void *class_handle2object(__u64 cookie)
164 struct handle_bucket *bucket;
165 struct portals_handle *h;
169 LASSERT(handle_hash != NULL);
171 /* Be careful when you want to change this code. See the
172 * rcu_read_lock() definition on top this file. - jxiong */
173 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
176 list_for_each_entry_rcu(h, &bucket->head, h_link) {
177 if (h->h_cookie != cookie)
180 spin_lock(&h->h_lock);
181 if (likely(h->h_in != 0)) {
185 spin_unlock(&h->h_lock);
193 void class_handle_free_cb(struct rcu_head *rcu)
195 struct portals_handle *h = RCU2HANDLE(rcu);
197 h->h_free_cb(h->h_ptr, h->h_size);
199 void *ptr = h->h_ptr;
200 unsigned int size = h->h_size;
206 int class_handle_init(void)
208 struct handle_bucket *bucket;
212 LASSERT(handle_hash == NULL);
214 OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
215 if (handle_hash == NULL)
218 spin_lock_init(&handle_base_lock);
219 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
221 CFS_INIT_LIST_HEAD(&bucket->head);
222 spin_lock_init(&bucket->lock);
225 /** bug 21430: add randomness to the initial base */
226 ll_get_random_bytes(seed, sizeof(seed));
227 do_gettimeofday(&tv);
228 ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
230 ll_get_random_bytes(&handle_base, sizeof(handle_base));
231 LASSERT(handle_base != 0ULL);
236 static void cleanup_all_handles(void)
240 for (i = 0; i < HANDLE_HASH_SIZE; i++) {
241 struct portals_handle *h;
242 spin_lock(&handle_hash[i].lock);
243 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
244 CERROR("force clean handle "LPX64" addr %p addref %p\n",
245 h->h_cookie, h, h->h_addref);
247 class_handle_unhash_nolock(h);
249 spin_unlock(&handle_hash[i].lock);
253 void class_handle_cleanup(void)
256 LASSERT(handle_hash != NULL);
258 count = atomic_read(&handle_count);
260 CERROR("handle_count at cleanup: %d\n", count);
261 cleanup_all_handles();
264 OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
267 if (atomic_read(&handle_count))
268 CERROR("leaked %d handles\n", atomic_read(&handle_count));