1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Phil Schwan <phil@clusterfs.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * You may have signed or agreed to another license before downloading
11 * this software. If so, you are bound by the terms and conditions
12 * of that agreement, and the following does not apply to you. See the
13 * LICENSE file included with this distribution for more information.
15 * If you did not agree to a different license, then this copy of Lustre
16 * is open source software; you can redistribute it and/or modify it
17 * under the terms of version 2 of the GNU General Public License as
18 * published by the Free Software Foundation.
20 * In either case, Lustre is distributed in the hope that it will be
21 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * license text for more details.
26 #define DEBUG_SUBSYSTEM S_CLASS
28 # include <liblustre.h>
31 #include <obd_support.h>
32 #include <lustre_handles.h>
33 #include <lustre_lib.h>
35 #if !defined(HAVE_RCU) || !defined(__KERNEL__)
36 # define list_add_rcu list_add
37 # define list_del_rcu list_del
38 # define list_for_each_rcu list_for_each
39 # define list_for_each_safe_rcu list_for_each_safe
40 # define rcu_read_lock() spin_lock(&bucket->lock)
41 # define rcu_read_unlock() spin_unlock(&bucket->lock)
42 #endif /* ifndef HAVE_RCU */
44 static __u64 handle_base;
46 static spinlock_t handle_base_lock;
48 static struct handle_bucket {
50 struct list_head head;
53 static atomic_t handle_count = ATOMIC_INIT(0);
56 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
57 * memory(OBD_VMALLOC). Therefore, we have to redefine the
58 * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
60 #define HANDLE_HASH_SIZE 4096
62 #define HANDLE_HASH_SIZE (1 << 14)
63 #endif /* ifdef __arch_um__ */
65 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
68 * Generate a unique 64bit cookie (hash) for a handle and insert it into
69 * global (per-node) hash-table.
71 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
73 struct handle_bucket *bucket;
77 LASSERT(list_empty(&h->h_link));
80 * This is fast, but simplistic cookie generation algorithm, it will
81 * need a re-do at some point in the future for security.
83 spin_lock(&handle_base_lock);
84 handle_base += HANDLE_INCR;
86 h->h_cookie = handle_base;
87 if (unlikely(handle_base == 0)) {
89 * Cookie of zero is "dangerous", because in many places it's
90 * assumed that 0 means "unassigned" handle, not bound to any
93 CWARN("The universe has been exhausted: cookie wrap-around.\n");
94 handle_base += HANDLE_INCR;
96 spin_unlock(&handle_base_lock);
98 atomic_inc(&handle_count);
100 spin_lock_init(&h->h_lock);
102 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
103 spin_lock(&bucket->lock);
104 list_add_rcu(&h->h_link, &bucket->head);
106 spin_unlock(&bucket->lock);
108 CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
113 static void class_handle_unhash_nolock(struct portals_handle *h)
115 if (list_empty(&h->h_link)) {
116 CERROR("removing an already-removed handle ("LPX64")\n",
121 CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
124 spin_lock(&h->h_lock);
126 spin_unlock(&h->h_lock);
130 spin_unlock(&h->h_lock);
131 list_del_rcu(&h->h_link);
134 void class_handle_unhash(struct portals_handle *h)
136 struct handle_bucket *bucket;
137 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
139 spin_lock(&bucket->lock);
140 class_handle_unhash_nolock(h);
141 spin_unlock(&bucket->lock);
143 atomic_dec(&handle_count);
146 void class_handle_hash_back(struct portals_handle *h)
148 struct handle_bucket *bucket;
151 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
153 atomic_inc(&handle_count);
154 spin_lock(&bucket->lock);
155 list_add_rcu(&h->h_link, &bucket->head);
157 spin_unlock(&bucket->lock);
162 void *class_handle2object(__u64 cookie)
164 struct handle_bucket *bucket;
165 struct list_head *tmp;
169 LASSERT(handle_hash != NULL);
171 /* Be careful when you want to change this code. See the
172 * rcu_read_lock() definition on top this file. - jxiong */
173 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
176 list_for_each_rcu(tmp, &bucket->head) {
177 struct portals_handle *h;
178 h = list_entry(tmp, struct portals_handle, h_link);
179 if (h->h_cookie != cookie)
182 spin_lock(&h->h_lock);
183 if (likely(h->h_cookie != 0)) {
187 spin_unlock(&h->h_lock);
195 void class_handle_free_cb(struct rcu_head *rcu)
197 struct portals_handle *h = RCU2HANDLE(rcu);
199 h->h_free_cb(h->h_ptr, h->h_size);
201 void *ptr = h->h_ptr;
202 unsigned int size = h->h_size;
207 int class_handle_init(void)
209 struct handle_bucket *bucket;
211 LASSERT(handle_hash == NULL);
213 OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
214 if (handle_hash == NULL)
217 spin_lock_init(&handle_base_lock);
218 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
220 CFS_INIT_LIST_HEAD(&bucket->head);
221 spin_lock_init(&bucket->lock);
223 ll_get_random_bytes(&handle_base, sizeof(handle_base));
224 LASSERT(handle_base != 0ULL);
229 static void cleanup_all_handles(void)
233 for (i = 0; i < HANDLE_HASH_SIZE; i++) {
234 struct list_head *tmp, *pos;
235 spin_lock(&handle_hash[i].lock);
236 list_for_each_safe_rcu(tmp, pos, &(handle_hash[i].head)) {
237 struct portals_handle *h;
238 h = list_entry(tmp, struct portals_handle, h_link);
240 CERROR("force clean handle "LPX64" addr %p addref %p\n",
241 h->h_cookie, h, h->h_addref);
243 class_handle_unhash_nolock(h);
245 spin_unlock(&handle_hash[i].lock);
249 void class_handle_cleanup(void)
252 LASSERT(handle_hash != NULL);
254 count = atomic_read(&handle_count);
256 CERROR("handle_count at cleanup: %d\n", count);
257 cleanup_all_handles();
260 OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
263 if (atomic_read(&handle_count))
264 CERROR("leaked %d handles\n", atomic_read(&handle_count));