Whamcloud - gitweb
b19b469cb6f44d1a46d25a4d24288822460c4260
[fs/lustre-release.git] / lustre / obdclass / lustre_handles.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lustre_handles.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef __KERNEL__
43 # include <liblustre.h>
44 #endif
45
46 #include <obd_support.h>
47 #include <lustre_handles.h>
48 #include <lustre_lib.h>
49
50 #if !defined(HAVE_RCU) || !defined(__KERNEL__)
51 # define list_add_rcu            list_add
52 # define list_del_rcu            list_del
53 # define list_for_each_rcu       list_for_each
54 # define list_for_each_safe_rcu  list_for_each_safe
55 # define rcu_read_lock()         spin_lock(&bucket->lock)
56 # define rcu_read_unlock()       spin_unlock(&bucket->lock)
57 #endif /* ifndef HAVE_RCU */
58
59 static __u64 handle_base;
60 #define HANDLE_INCR 7
61 static spinlock_t handle_base_lock;
62
63 static struct handle_bucket {
64         spinlock_t lock;
65         struct list_head head;
66 } *handle_hash;
67
68 static atomic_t handle_count = ATOMIC_INIT(0);
69
70 #ifdef __arch_um__
71 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
72  * memory(OBD_VMALLOC). Therefore, we have to redefine the
73  * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
74  */
75 #define HANDLE_HASH_SIZE 4096
76 #else
77 #define HANDLE_HASH_SIZE (1 << 14)
78 #endif /* ifdef __arch_um__ */
79
80 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
81
82 /*
83  * Generate a unique 64bit cookie (hash) for a handle and insert it into
84  * global (per-node) hash-table.
85  */
86 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
87 {
88         struct handle_bucket *bucket;
89         ENTRY;
90
91         LASSERT(h != NULL);
92         LASSERT(list_empty(&h->h_link));
93
94         /*
95          * This is fast, but simplistic cookie generation algorithm, it will
96          * need a re-do at some point in the future for security.
97          */
98         spin_lock(&handle_base_lock);
99         handle_base += HANDLE_INCR;
100
101         h->h_cookie = handle_base;
102         if (unlikely(handle_base == 0)) {
103                 /*
104                  * Cookie of zero is "dangerous", because in many places it's
105                  * assumed that 0 means "unassigned" handle, not bound to any
106                  * object.
107                  */
108                 CWARN("The universe has been exhausted: cookie wrap-around.\n");
109                 handle_base += HANDLE_INCR;
110         }
111         spin_unlock(&handle_base_lock);
112  
113         atomic_inc(&handle_count);
114         h->h_addref = cb;
115         spin_lock_init(&h->h_lock);
116
117         bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
118         spin_lock(&bucket->lock);
119         list_add_rcu(&h->h_link, &bucket->head);
120         h->h_in = 1;
121         spin_unlock(&bucket->lock);
122
123         CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
124                h, h->h_cookie);
125         EXIT;
126 }
127
128 static void class_handle_unhash_nolock(struct portals_handle *h)
129 {
130         if (list_empty(&h->h_link)) {
131                 CERROR("removing an already-removed handle ("LPX64")\n",
132                        h->h_cookie);
133                 return;
134         }
135
136         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
137                h, h->h_cookie);
138
139         spin_lock(&h->h_lock);
140         if (h->h_in == 0) {
141                 spin_unlock(&h->h_lock);
142                 return;
143         }
144         h->h_in = 0;
145         spin_unlock(&h->h_lock);
146         list_del_rcu(&h->h_link);
147 }
148
149 void class_handle_unhash(struct portals_handle *h)
150 {
151         struct handle_bucket *bucket;
152         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
153
154         spin_lock(&bucket->lock);
155         class_handle_unhash_nolock(h);
156         spin_unlock(&bucket->lock);
157
158         atomic_dec(&handle_count);
159 }
160
161 void class_handle_hash_back(struct portals_handle *h)
162 {
163         struct handle_bucket *bucket;
164         ENTRY;
165
166         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
167
168         atomic_inc(&handle_count);
169         spin_lock(&bucket->lock);
170         list_add_rcu(&h->h_link, &bucket->head);
171         h->h_in = 1;
172         spin_unlock(&bucket->lock);
173
174         EXIT;
175 }
176
177 void *class_handle2object(__u64 cookie)
178 {
179         struct handle_bucket *bucket;
180         struct list_head *tmp;
181         void *retval = NULL;
182         ENTRY;
183
184         LASSERT(handle_hash != NULL);
185
186         /* Be careful when you want to change this code. See the 
187          * rcu_read_lock() definition on top this file. - jxiong */
188         bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
189
190         rcu_read_lock();
191         list_for_each_rcu(tmp, &bucket->head) {
192                 struct portals_handle *h;
193                 h = list_entry(tmp, struct portals_handle, h_link);
194                 if (h->h_cookie != cookie)
195                         continue;
196
197                 spin_lock(&h->h_lock);
198                 if (likely(h->h_in != 0)) {
199                         h->h_addref(h);
200                         retval = h;
201                 }
202                 spin_unlock(&h->h_lock);
203                 break;
204         }
205         rcu_read_unlock();
206
207         RETURN(retval);
208 }
209
210 void class_handle_free_cb(struct rcu_head *rcu)
211 {
212         struct portals_handle *h = RCU2HANDLE(rcu);
213         if (h->h_free_cb) {
214                 h->h_free_cb(h->h_ptr, h->h_size);
215         } else {
216                 void *ptr = h->h_ptr;
217                 unsigned int size = h->h_size;
218                 OBD_FREE(ptr, size);
219         }
220 }
221
222 int class_handle_init(void)
223 {
224         struct handle_bucket *bucket;
225
226         LASSERT(handle_hash == NULL);
227
228         OBD_VMALLOC(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
229         if (handle_hash == NULL)
230                 return -ENOMEM;
231
232         spin_lock_init(&handle_base_lock);
233         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
234              bucket--) {
235                 CFS_INIT_LIST_HEAD(&bucket->head);
236                 spin_lock_init(&bucket->lock);
237         }
238         ll_get_random_bytes(&handle_base, sizeof(handle_base));
239         LASSERT(handle_base != 0ULL);
240
241         return 0;
242 }
243
244 static void cleanup_all_handles(void)
245 {
246         int i;
247
248         for (i = 0; i < HANDLE_HASH_SIZE; i++) {
249                 struct list_head *pos, *n;
250                 n = NULL;
251                 spin_lock(&handle_hash[i].lock);
252                 list_for_each_safe_rcu(pos, n, &(handle_hash[i].head)) {
253                         struct portals_handle *h;
254                         h = list_entry(pos, struct portals_handle, h_link);
255
256                         CERROR("force clean handle "LPX64" addr %p addref %p\n",
257                                h->h_cookie, h, h->h_addref);
258
259                         class_handle_unhash_nolock(h);
260                 }
261                 spin_unlock(&handle_hash[i].lock);
262         }
263 }
264
265 void class_handle_cleanup(void)
266 {
267         int count;
268         LASSERT(handle_hash != NULL);
269
270         count = atomic_read(&handle_count);
271         if (count != 0) {
272                 CERROR("handle_count at cleanup: %d\n", count);
273                 cleanup_all_handles();
274         }
275
276         OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
277         handle_hash = NULL;
278
279         if (atomic_read(&handle_count))
280                 CERROR("leaked %d handles\n", atomic_read(&handle_count));
281 }