4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lustre_handles.c
38 * Author: Phil Schwan <phil@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # include <liblustre.h>
46 #include <obd_support.h>
47 #include <lustre_handles.h>
48 #include <lustre_lib.h>
51 # define list_add_rcu cfs_list_add
52 # define list_del_rcu cfs_list_del
53 # define list_for_each_rcu cfs_list_for_each
54 # define list_for_each_safe_rcu cfs_list_for_each_safe
55 # define list_for_each_entry_rcu cfs_list_for_each_entry
56 # define rcu_read_lock() spin_lock(&bucket->lock)
57 # define rcu_read_unlock() spin_unlock(&bucket->lock)
58 #endif /* !__KERNEL__ */
60 static __u64 handle_base;
62 static spinlock_t handle_base_lock;
64 static struct handle_bucket {
70 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
71 * memory(OBD_VMALLOC). Therefore, we have to redefine the
72 * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
74 #define HANDLE_HASH_SIZE 4096
76 #define HANDLE_HASH_SIZE (1 << 16)
77 #endif /* ifdef __arch_um__ */
79 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
82 * Generate a unique 64bit cookie (hash) for a handle and insert it into
83 * global (per-node) hash-table.
85 void class_handle_hash(struct portals_handle *h,
86 struct portals_handle_ops *ops)
88 struct handle_bucket *bucket;
92 LASSERT(cfs_list_empty(&h->h_link));
95 * This is fast, but simplistic cookie generation algorithm, it will
96 * need a re-do at some point in the future for security.
98 spin_lock(&handle_base_lock);
99 handle_base += HANDLE_INCR;
101 h->h_cookie = handle_base;
102 if (unlikely(handle_base == 0)) {
104 * Cookie of zero is "dangerous", because in many places it's
105 * assumed that 0 means "unassigned" handle, not bound to any
108 CWARN("The universe has been exhausted: cookie wrap-around.\n");
109 handle_base += HANDLE_INCR;
111 spin_unlock(&handle_base_lock);
114 spin_lock_init(&h->h_lock);
116 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
117 spin_lock(&bucket->lock);
118 list_add_rcu(&h->h_link, &bucket->head);
120 spin_unlock(&bucket->lock);
122 CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
126 EXPORT_SYMBOL(class_handle_hash);
128 static void class_handle_unhash_nolock(struct portals_handle *h)
130 if (cfs_list_empty(&h->h_link)) {
131 CERROR("removing an already-removed handle ("LPX64")\n",
136 CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
139 spin_lock(&h->h_lock);
141 spin_unlock(&h->h_lock);
145 spin_unlock(&h->h_lock);
146 list_del_rcu(&h->h_link);
149 void class_handle_unhash(struct portals_handle *h)
151 struct handle_bucket *bucket;
152 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
154 spin_lock(&bucket->lock);
155 class_handle_unhash_nolock(h);
156 spin_unlock(&bucket->lock);
158 EXPORT_SYMBOL(class_handle_unhash);
160 void class_handle_hash_back(struct portals_handle *h)
162 struct handle_bucket *bucket;
165 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
167 spin_lock(&bucket->lock);
168 list_add_rcu(&h->h_link, &bucket->head);
170 spin_unlock(&bucket->lock);
174 EXPORT_SYMBOL(class_handle_hash_back);
176 void *class_handle2object(__u64 cookie)
178 struct handle_bucket *bucket;
179 struct portals_handle *h;
183 LASSERT(handle_hash != NULL);
185 /* Be careful when you want to change this code. See the
186 * rcu_read_lock() definition on top this file. - jxiong */
187 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
190 list_for_each_entry_rcu(h, &bucket->head, h_link) {
191 if (h->h_cookie != cookie)
194 spin_lock(&h->h_lock);
195 if (likely(h->h_in != 0)) {
196 h->h_ops->hop_addref(h);
199 spin_unlock(&h->h_lock);
206 EXPORT_SYMBOL(class_handle2object);
208 void class_handle_free_cb(cfs_rcu_head_t *rcu)
210 struct portals_handle *h = RCU2HANDLE(rcu);
211 void *ptr = (void *)(unsigned long)h->h_cookie;
213 if (h->h_ops->hop_free != NULL)
214 h->h_ops->hop_free(ptr, h->h_size);
216 OBD_FREE(ptr, h->h_size);
218 EXPORT_SYMBOL(class_handle_free_cb);
220 int class_handle_init(void)
222 struct handle_bucket *bucket;
226 LASSERT(handle_hash == NULL);
228 OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
229 if (handle_hash == NULL)
232 spin_lock_init(&handle_base_lock);
233 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
235 CFS_INIT_LIST_HEAD(&bucket->head);
236 spin_lock_init(&bucket->lock);
239 /** bug 21430: add randomness to the initial base */
240 cfs_get_random_bytes(seed, sizeof(seed));
241 cfs_gettimeofday(&tv);
242 cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
244 cfs_get_random_bytes(&handle_base, sizeof(handle_base));
245 LASSERT(handle_base != 0ULL);
250 static int cleanup_all_handles(void)
255 for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
256 struct portals_handle *h;
258 spin_lock(&handle_hash[i].lock);
259 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
260 CERROR("force clean handle "LPX64" addr %p ops %p\n",
261 h->h_cookie, h, h->h_ops);
263 class_handle_unhash_nolock(h);
266 spin_unlock(&handle_hash[i].lock);
272 void class_handle_cleanup(void)
275 LASSERT(handle_hash != NULL);
277 count = cleanup_all_handles();
279 OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
283 CERROR("handle_count at cleanup: %d\n", count);