Whamcloud - gitweb
merge b_devel into HEAD, which will become 0.7.3
[fs/lustre-release.git] / lustre / obdclass / lustre_handles.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org/
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #define DEBUG_SUBSYSTEM S_CLASS
24 #ifdef __KERNEL__
25 # include <linux/types.h>
26 # include <linux/random.h>
27 #else 
28 # include <liblustre.h>
29 #endif 
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_handles.h>
33
34 static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
35 static spinlock_t random_lock = SPIN_LOCK_UNLOCKED;
36 static struct list_head *handle_hash = NULL;
37 static int handle_count = 0;
38
39 #define HANDLE_HASH_SIZE (1 << 14)
40 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
41
42 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
43 {
44         struct list_head *bucket;
45         ENTRY;
46
47         LASSERT(h != NULL);
48         LASSERT(list_empty(&h->h_link));
49
50         /* My hypothesis is that get_random_bytes, if called from two threads at
51          * the same time, will return the same bytes. -phil */
52         spin_lock(&random_lock);
53         get_random_bytes(&h->h_cookie, sizeof(h->h_cookie));
54         spin_unlock(&random_lock);
55
56         h->h_addref = cb;
57
58         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
59
60         CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
61                h, h->h_cookie);
62
63         spin_lock(&handle_lock);
64         list_add(&h->h_link, bucket);
65         handle_count++;
66         spin_unlock(&handle_lock);
67         EXIT;
68 }
69
70 static void class_handle_unhash_nolock(struct portals_handle *h)
71 {
72         LASSERT(!list_empty(&h->h_link));
73
74         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
75                h, h->h_cookie);
76
77         handle_count--;
78         list_del_init(&h->h_link);
79 }
80
81 void class_handle_unhash(struct portals_handle *h)
82 {
83         spin_lock(&handle_lock);
84         class_handle_unhash_nolock(h);
85         spin_unlock(&handle_lock);
86 }
87
88 void *class_handle2object(__u64 cookie)
89 {
90         struct list_head *bucket, *tmp;
91         void *retval = NULL;
92         ENTRY;
93
94         LASSERT(handle_hash != NULL);
95
96         spin_lock(&handle_lock);
97         bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
98
99         list_for_each(tmp, bucket) {
100                 struct portals_handle *h;
101                 h = list_entry(tmp, struct portals_handle, h_link);
102
103                 if (h->h_cookie == cookie) {
104                         h->h_addref(h);
105                         retval = h;
106                         break;
107                 }
108         }
109         spin_unlock(&handle_lock);
110
111         RETURN(retval);
112 }
113
114 int class_handle_init(void)
115 {
116         struct list_head *bucket;
117
118         LASSERT(handle_hash == NULL);
119
120         OBD_VMALLOC(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
121         if (handle_hash == NULL)
122                 return -ENOMEM;
123
124         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
125              bucket--)
126                 INIT_LIST_HEAD(bucket);
127
128         return 0;
129 }
130
131 static void cleanup_all_handles(void)
132 {
133         int i;
134
135         spin_lock(&handle_lock);
136         for (i = 0; i < HANDLE_HASH_SIZE; i++) {
137                 struct list_head *tmp, *pos;
138                 list_for_each_safe(tmp, pos, &(handle_hash[i])) {
139                         struct portals_handle *h;
140                         h = list_entry(tmp, struct portals_handle, h_link);
141
142                         CERROR("forcing cleanup for handle "LPX64"\n",
143                                h->h_cookie);
144
145                         class_handle_unhash_nolock(h);
146                 }
147         }
148         spin_unlock(&handle_lock);
149 }
150
151 void class_handle_cleanup(void)
152 {
153         LASSERT(handle_hash != NULL);
154
155         if (handle_count != 0) {
156                 CERROR("handle_count at cleanup: %d\n", handle_count);
157                 cleanup_all_handles();
158         }
159
160         OBD_VFREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
161         handle_hash = NULL;
162
163         if (handle_count)
164                 CERROR("leaked %d handles\n", handle_count);
165 }