Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / obdclass / lustre_handles.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lustre_handles.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef __KERNEL__
43 # include <liblustre.h>
44 #endif
45
46 #include <obd_support.h>
47 #include <lustre_handles.h>
48 #include <lustre_lib.h>
49
50 #if !defined(HAVE_RCU) || !defined(__KERNEL__)
51 # define list_add_rcu            cfs_list_add
52 # define list_del_rcu            cfs_list_del
53 # define list_for_each_rcu       cfs_list_for_each
54 # define list_for_each_safe_rcu  cfs_list_for_each_safe
55 # define list_for_each_entry_rcu cfs_list_for_each_entry
56 # define rcu_read_lock()         cfs_spin_lock(&bucket->lock)
57 # define rcu_read_unlock()       cfs_spin_unlock(&bucket->lock)
58 #endif /* ifndef HAVE_RCU */
59
60 static __u64 handle_base;
61 #define HANDLE_INCR 7
62 static cfs_spinlock_t handle_base_lock;
63
64 static struct handle_bucket {
65         cfs_spinlock_t lock;
66         cfs_list_t head;
67 } *handle_hash;
68
69 static cfs_atomic_t handle_count = CFS_ATOMIC_INIT(0);
70
71 #ifdef __arch_um__
72 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
73  * memory(OBD_VMALLOC). Therefore, we have to redefine the
74  * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
75  */
76 #define HANDLE_HASH_SIZE 4096
77 #else
78 #define HANDLE_HASH_SIZE (1 << 14)
79 #endif /* ifdef __arch_um__ */
80
81 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
82
83 /*
84  * Generate a unique 64bit cookie (hash) for a handle and insert it into
85  * global (per-node) hash-table.
86  */
87 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
88 {
89         struct handle_bucket *bucket;
90         ENTRY;
91
92         LASSERT(h != NULL);
93         LASSERT(cfs_list_empty(&h->h_link));
94
95         /*
96          * This is fast, but simplistic cookie generation algorithm, it will
97          * need a re-do at some point in the future for security.
98          */
99         cfs_spin_lock(&handle_base_lock);
100         handle_base += HANDLE_INCR;
101
102         h->h_cookie = handle_base;
103         if (unlikely(handle_base == 0)) {
104                 /*
105                  * Cookie of zero is "dangerous", because in many places it's
106                  * assumed that 0 means "unassigned" handle, not bound to any
107                  * object.
108                  */
109                 CWARN("The universe has been exhausted: cookie wrap-around.\n");
110                 handle_base += HANDLE_INCR;
111         }
112         cfs_spin_unlock(&handle_base_lock);
113  
114         cfs_atomic_inc(&handle_count);
115         h->h_addref = cb;
116         cfs_spin_lock_init(&h->h_lock);
117
118         bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
119         cfs_spin_lock(&bucket->lock);
120         list_add_rcu(&h->h_link, &bucket->head);
121         h->h_in = 1;
122         cfs_spin_unlock(&bucket->lock);
123
124         CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
125                h, h->h_cookie);
126         EXIT;
127 }
128
129 static void class_handle_unhash_nolock(struct portals_handle *h)
130 {
131         if (cfs_list_empty(&h->h_link)) {
132                 CERROR("removing an already-removed handle ("LPX64")\n",
133                        h->h_cookie);
134                 return;
135         }
136
137         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
138                h, h->h_cookie);
139
140         cfs_spin_lock(&h->h_lock);
141         if (h->h_in == 0) {
142                 cfs_spin_unlock(&h->h_lock);
143                 return;
144         }
145         h->h_in = 0;
146         cfs_spin_unlock(&h->h_lock);
147         list_del_rcu(&h->h_link);
148 }
149
150 void class_handle_unhash(struct portals_handle *h)
151 {
152         struct handle_bucket *bucket;
153         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
154
155         cfs_spin_lock(&bucket->lock);
156         class_handle_unhash_nolock(h);
157         cfs_spin_unlock(&bucket->lock);
158
159         cfs_atomic_dec(&handle_count);
160 }
161
162 void class_handle_hash_back(struct portals_handle *h)
163 {
164         struct handle_bucket *bucket;
165         ENTRY;
166
167         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
168
169         cfs_atomic_inc(&handle_count);
170         cfs_spin_lock(&bucket->lock);
171         list_add_rcu(&h->h_link, &bucket->head);
172         h->h_in = 1;
173         cfs_spin_unlock(&bucket->lock);
174
175         EXIT;
176 }
177
178 void *class_handle2object(__u64 cookie)
179 {
180         struct handle_bucket *bucket;
181         struct portals_handle *h;
182         void *retval = NULL;
183         ENTRY;
184
185         LASSERT(handle_hash != NULL);
186
187         /* Be careful when you want to change this code. See the 
188          * rcu_read_lock() definition on top this file. - jxiong */
189         bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
190
191         rcu_read_lock();
192         list_for_each_entry_rcu(h, &bucket->head, h_link) {
193                 if (h->h_cookie != cookie)
194                         continue;
195
196                 cfs_spin_lock(&h->h_lock);
197                 if (likely(h->h_in != 0)) {
198                         h->h_addref(h);
199                         retval = h;
200                 }
201                 cfs_spin_unlock(&h->h_lock);
202                 break;
203         }
204         rcu_read_unlock();
205
206         RETURN(retval);
207 }
208
209 void class_handle_free_cb(cfs_rcu_head_t *rcu)
210 {
211         struct portals_handle *h = RCU2HANDLE(rcu);
212         if (h->h_free_cb) {
213                 h->h_free_cb(h->h_ptr, h->h_size);
214         } else {
215                 void *ptr = h->h_ptr;
216                 unsigned int size = h->h_size;
217                 OBD_FREE(ptr, size);
218         }
219 }
220
221 int class_handle_init(void)
222 {
223         struct handle_bucket *bucket;
224         struct timeval tv;
225         int seed[2];
226
227         LASSERT(handle_hash == NULL);
228
229         OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
230         if (handle_hash == NULL)
231                 return -ENOMEM;
232
233         cfs_spin_lock_init(&handle_base_lock);
234         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
235              bucket--) {
236                 CFS_INIT_LIST_HEAD(&bucket->head);
237                 cfs_spin_lock_init(&bucket->lock);
238         }
239
240         /** bug 21430: add randomness to the initial base */
241         ll_get_random_bytes(seed, sizeof(seed));
242         cfs_gettimeofday(&tv);
243         ll_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
244
245         ll_get_random_bytes(&handle_base, sizeof(handle_base));
246         LASSERT(handle_base != 0ULL);
247
248         return 0;
249 }
250
251 static void cleanup_all_handles(void)
252 {
253         int i;
254
255         for (i = 0; i < HANDLE_HASH_SIZE; i++) {
256                 struct portals_handle *h;
257
258                 cfs_spin_lock(&handle_hash[i].lock);
259                 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
260                         CERROR("force clean handle "LPX64" addr %p addref %p\n",
261                                h->h_cookie, h, h->h_addref);
262
263                         class_handle_unhash_nolock(h);
264                 }
265                 cfs_spin_unlock(&handle_hash[i].lock);
266         }
267 }
268
269 void class_handle_cleanup(void)
270 {
271         int count;
272         LASSERT(handle_hash != NULL);
273
274         count = cfs_atomic_read(&handle_count);
275         if (count != 0) {
276                 CERROR("handle_count at cleanup: %d\n", count);
277                 cleanup_all_handles();
278         }
279
280         OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
281         handle_hash = NULL;
282
283         if (cfs_atomic_read(&handle_count))
284                 CERROR("leaked %d handles\n", cfs_atomic_read(&handle_count));
285 }