Whamcloud - gitweb
LU-1735 ptlrpc: only set jobid if not already set
[fs/lustre-release.git] / lustre / obdclass / lustre_handles.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lustre_handles.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef __KERNEL__
43 # include <liblustre.h>
44 #endif
45
46 #include <obd_support.h>
47 #include <lustre_handles.h>
48 #include <lustre_lib.h>
49
50 #ifndef __KERNEL__
51 # define list_add_rcu            cfs_list_add
52 # define list_del_rcu            cfs_list_del
53 # define list_for_each_rcu       cfs_list_for_each
54 # define list_for_each_safe_rcu  cfs_list_for_each_safe
55 # define list_for_each_entry_rcu cfs_list_for_each_entry
56 # define rcu_read_lock()         cfs_spin_lock(&bucket->lock)
57 # define rcu_read_unlock()       cfs_spin_unlock(&bucket->lock)
58 #endif /* !__KERNEL__ */
59
60 static __u64 handle_base;
61 #define HANDLE_INCR 7
62 static cfs_spinlock_t handle_base_lock;
63
64 static struct handle_bucket {
65         cfs_spinlock_t  lock;
66         cfs_list_t      head;
67 } *handle_hash;
68
69 #ifdef __arch_um__
70 /* For unknown reason, UML uses kmalloc rather than vmalloc to allocate
71  * memory(OBD_VMALLOC). Therefore, we have to redefine the
72  * HANDLE_HASH_SIZE to make the hash heads don't exceed 128K.
73  */
74 #define HANDLE_HASH_SIZE 4096
75 #else
76 #define HANDLE_HASH_SIZE (1 << 16)
77 #endif /* ifdef __arch_um__ */
78
79 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
80
81 /*
82  * Generate a unique 64bit cookie (hash) for a handle and insert it into
83  * global (per-node) hash-table.
84  */
85 void class_handle_hash(struct portals_handle *h,
86                        struct portals_handle_ops *ops)
87 {
88         struct handle_bucket *bucket;
89         ENTRY;
90
91         LASSERT(h != NULL);
92         LASSERT(cfs_list_empty(&h->h_link));
93
94         /*
95          * This is fast, but simplistic cookie generation algorithm, it will
96          * need a re-do at some point in the future for security.
97          */
98         cfs_spin_lock(&handle_base_lock);
99         handle_base += HANDLE_INCR;
100
101         h->h_cookie = handle_base;
102         if (unlikely(handle_base == 0)) {
103                 /*
104                  * Cookie of zero is "dangerous", because in many places it's
105                  * assumed that 0 means "unassigned" handle, not bound to any
106                  * object.
107                  */
108                 CWARN("The universe has been exhausted: cookie wrap-around.\n");
109                 handle_base += HANDLE_INCR;
110         }
111         cfs_spin_unlock(&handle_base_lock);
112
113         h->h_ops = ops;
114         cfs_spin_lock_init(&h->h_lock);
115
116         bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
117         cfs_spin_lock(&bucket->lock);
118         list_add_rcu(&h->h_link, &bucket->head);
119         h->h_in = 1;
120         cfs_spin_unlock(&bucket->lock);
121
122         CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
123                h, h->h_cookie);
124         EXIT;
125 }
126 EXPORT_SYMBOL(class_handle_hash);
127
128 static void class_handle_unhash_nolock(struct portals_handle *h)
129 {
130         if (cfs_list_empty(&h->h_link)) {
131                 CERROR("removing an already-removed handle ("LPX64")\n",
132                        h->h_cookie);
133                 return;
134         }
135
136         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
137                h, h->h_cookie);
138
139         cfs_spin_lock(&h->h_lock);
140         if (h->h_in == 0) {
141                 cfs_spin_unlock(&h->h_lock);
142                 return;
143         }
144         h->h_in = 0;
145         cfs_spin_unlock(&h->h_lock);
146         list_del_rcu(&h->h_link);
147 }
148
149 void class_handle_unhash(struct portals_handle *h)
150 {
151         struct handle_bucket *bucket;
152         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
153
154         cfs_spin_lock(&bucket->lock);
155         class_handle_unhash_nolock(h);
156         cfs_spin_unlock(&bucket->lock);
157 }
158 EXPORT_SYMBOL(class_handle_unhash);
159
160 void class_handle_hash_back(struct portals_handle *h)
161 {
162         struct handle_bucket *bucket;
163         ENTRY;
164
165         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
166
167         cfs_spin_lock(&bucket->lock);
168         list_add_rcu(&h->h_link, &bucket->head);
169         h->h_in = 1;
170         cfs_spin_unlock(&bucket->lock);
171
172         EXIT;
173 }
174 EXPORT_SYMBOL(class_handle_hash_back);
175
176 void *class_handle2object(__u64 cookie)
177 {
178         struct handle_bucket *bucket;
179         struct portals_handle *h;
180         void *retval = NULL;
181         ENTRY;
182
183         LASSERT(handle_hash != NULL);
184
185         /* Be careful when you want to change this code. See the
186          * rcu_read_lock() definition on top this file. - jxiong */
187         bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
188
189         rcu_read_lock();
190         list_for_each_entry_rcu(h, &bucket->head, h_link) {
191                 if (h->h_cookie != cookie)
192                         continue;
193
194                 cfs_spin_lock(&h->h_lock);
195                 if (likely(h->h_in != 0)) {
196                         h->h_ops->hop_addref(h);
197                         retval = h;
198                 }
199                 cfs_spin_unlock(&h->h_lock);
200                 break;
201         }
202         rcu_read_unlock();
203
204         RETURN(retval);
205 }
206 EXPORT_SYMBOL(class_handle2object);
207
208 void class_handle_free_cb(cfs_rcu_head_t *rcu)
209 {
210         struct portals_handle *h = RCU2HANDLE(rcu);
211         void *ptr = (void *)(unsigned long)h->h_cookie;
212
213         if (h->h_ops->hop_free != NULL)
214                 h->h_ops->hop_free(ptr, h->h_size);
215         else
216                 OBD_FREE(ptr, h->h_size);
217 }
218 EXPORT_SYMBOL(class_handle_free_cb);
219
220 int class_handle_init(void)
221 {
222         struct handle_bucket *bucket;
223         struct timeval tv;
224         int seed[2];
225
226         LASSERT(handle_hash == NULL);
227
228         OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
229         if (handle_hash == NULL)
230                 return -ENOMEM;
231
232         cfs_spin_lock_init(&handle_base_lock);
233         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
234              bucket--) {
235                 CFS_INIT_LIST_HEAD(&bucket->head);
236                 cfs_spin_lock_init(&bucket->lock);
237         }
238
239         /** bug 21430: add randomness to the initial base */
240         cfs_get_random_bytes(seed, sizeof(seed));
241         cfs_gettimeofday(&tv);
242         cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
243
244         cfs_get_random_bytes(&handle_base, sizeof(handle_base));
245         LASSERT(handle_base != 0ULL);
246
247         return 0;
248 }
249
250 static int cleanup_all_handles(void)
251 {
252         int rc;
253         int i;
254
255         for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
256                 struct portals_handle *h;
257
258                 cfs_spin_lock(&handle_hash[i].lock);
259                 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
260                         CERROR("force clean handle "LPX64" addr %p ops %p\n",
261                                h->h_cookie, h, h->h_ops);
262
263                         class_handle_unhash_nolock(h);
264                         rc++;
265                 }
266                 cfs_spin_unlock(&handle_hash[i].lock);
267         }
268
269         return rc;
270 }
271
272 void class_handle_cleanup(void)
273 {
274         int count;
275         LASSERT(handle_hash != NULL);
276
277         count = cleanup_all_handles();
278
279         OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
280         handle_hash = NULL;
281
282         if (count != 0)
283                 CERROR("handle_count at cleanup: %d\n", count);
284 }