Whamcloud - gitweb
land 0.5.20.3 b_devel onto HEAD (b_devel will remain)
[fs/lustre-release.git] / lustre / obdclass / lustre_handles.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002 Cluster File Systems, Inc.
5  *   Author: Phil Schwan <phil@clusterfs.com>
6  *
7  *   This file is part of Portals, http://www.sf.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2.1 of the GNU Lesser General
11  *   Public License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public
19  *   License along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #define DEBUG_SUBSYSTEM S_PORTALS
24 #ifdef __KERNEL__
25 #include <linux/types.h>
26 #include <linux/random.h>
27 #else 
28 #include <liblustre.h>
29 #endif 
30
31
32 #include <linux/kp30.h>
33 #include <linux/lustre_handles.h>
34
35 static spinlock_t handle_lock = SPIN_LOCK_UNLOCKED;
36 static spinlock_t random_lock = SPIN_LOCK_UNLOCKED;
37 static struct list_head *handle_hash = NULL;
38 static int handle_count = 0;
39
40 #define HANDLE_HASH_SIZE (1 << 14)
41 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
42
43 void class_handle_hash(struct portals_handle *h, portals_handle_addref_cb cb)
44 {
45         struct list_head *bucket;
46         ENTRY;
47
48         LASSERT(h != NULL);
49         LASSERT(list_empty(&h->h_link));
50
51         /* My hypothesis is that get_random_bytes, if called from two threads at
52          * the same time, will return the same bytes. -phil */
53         spin_lock(&random_lock);
54         get_random_bytes(&h->h_cookie, sizeof(h->h_cookie));
55         spin_unlock(&random_lock);
56
57         h->h_addref = cb;
58
59         bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
60
61         CDEBUG(D_INFO, "adding object %p with handle "LPX64" to hash\n",
62                h, h->h_cookie);
63
64         spin_lock(&handle_lock);
65         list_add(&h->h_link, bucket);
66         handle_count++;
67         spin_unlock(&handle_lock);
68         EXIT;
69 }
70
71 static void class_handle_unhash_nolock(struct portals_handle *h)
72 {
73         LASSERT(!list_empty(&h->h_link));
74
75         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
76                h, h->h_cookie);
77
78         handle_count--;
79         list_del_init(&h->h_link);
80 }
81
82 void class_handle_unhash(struct portals_handle *h)
83 {
84         spin_lock(&handle_lock);
85         class_handle_unhash_nolock(h);
86         spin_unlock(&handle_lock);
87 }
88
89 void *class_handle2object(__u64 cookie)
90 {
91         struct list_head *bucket, *tmp;
92         void *retval = NULL;
93         ENTRY;
94
95         LASSERT(handle_hash != NULL);
96
97         spin_lock(&handle_lock);
98         bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
99
100         list_for_each(tmp, bucket) {
101                 struct portals_handle *h;
102                 h = list_entry(tmp, struct portals_handle, h_link);
103
104                 if (h->h_cookie == cookie) {
105                         h->h_addref(h);
106                         retval = h;
107                         break;
108                 }
109         }
110         spin_unlock(&handle_lock);
111
112         RETURN(retval);
113 }
114
115 int class_handle_init(void)
116 {
117         struct list_head *bucket;
118
119         LASSERT(handle_hash == NULL);
120
121         PORTAL_ALLOC(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
122         if (handle_hash == NULL)
123                 return -ENOMEM;
124
125         for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
126              bucket--)
127                 INIT_LIST_HEAD(bucket);
128
129         return 0;
130 }
131
132 static void cleanup_all_handles(void)
133 {
134         int i;
135
136         spin_lock(&handle_lock);
137         for (i = 0; i < HANDLE_HASH_SIZE; i++) {
138                 struct list_head *tmp, *pos;
139                 list_for_each_safe(tmp, pos, &(handle_hash[i])) {
140                         struct portals_handle *h;
141                         h = list_entry(tmp, struct portals_handle, h_link);
142
143                         CERROR("forcing cleanup for handle "LPX64"\n",
144                                h->h_cookie);
145
146                         class_handle_unhash_nolock(h);
147                 }
148         }
149         spin_lock(&handle_lock);
150 }
151
152 void class_handle_cleanup(void)
153 {
154         LASSERT(handle_hash != NULL);
155
156         if (handle_count != 0) {
157                 CERROR("handle_count at cleanup: %d\n", handle_count);
158                 cleanup_all_handles();
159         }
160
161         PORTAL_FREE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
162         handle_hash = NULL;
163
164         if (handle_count)
165                 CERROR("leaked %d handles\n", handle_count);
166 }