4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/lustre_handles.c
34 * Author: Phil Schwan <phil@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <obd_support.h>
40 #include <lustre_handles.h>
41 #include <lustre_lib.h>
44 static __u64 handle_base;
46 static DEFINE_SPINLOCK(handle_base_lock);
48 static struct handle_bucket {
50 struct list_head head;
53 #define HANDLE_HASH_SIZE (1 << 16)
54 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
57 * Generate a unique 64bit cookie (hash) for a handle and insert it into
58 * global (per-node) hash-table.
60 void class_handle_hash(struct portals_handle *h,
61 struct portals_handle_ops *ops)
63 struct handle_bucket *bucket;
68 LASSERT(list_empty(&h->h_link));
71 * This is fast, but simplistic cookie generation algorithm, it will
72 * need a re-do at some point in the future for security.
74 spin_lock(&handle_base_lock);
75 handle_base += HANDLE_INCR;
77 if (unlikely(handle_base == 0)) {
79 * Cookie of zero is "dangerous", because in many places it's
80 * assumed that 0 means "unassigned" handle, not bound to any
83 CWARN("The universe has been exhausted: cookie wrap-around.\n");
84 handle_base += HANDLE_INCR;
86 h->h_cookie = handle_base;
87 spin_unlock(&handle_base_lock);
90 spin_lock_init(&h->h_lock);
92 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
93 spin_lock(&bucket->lock);
94 list_add_rcu(&h->h_link, &bucket->head);
96 spin_unlock(&bucket->lock);
98 CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
102 EXPORT_SYMBOL(class_handle_hash);
104 static void class_handle_unhash_nolock(struct portals_handle *h)
106 if (list_empty(&h->h_link)) {
107 CERROR("removing an already-removed handle (%#llx)\n",
112 CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
115 spin_lock(&h->h_lock);
117 spin_unlock(&h->h_lock);
121 spin_unlock(&h->h_lock);
122 list_del_rcu(&h->h_link);
125 void class_handle_unhash(struct portals_handle *h)
127 struct handle_bucket *bucket;
128 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
130 spin_lock(&bucket->lock);
131 class_handle_unhash_nolock(h);
132 spin_unlock(&bucket->lock);
134 EXPORT_SYMBOL(class_handle_unhash);
136 void class_handle_hash_back(struct portals_handle *h)
138 struct handle_bucket *bucket;
141 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
143 spin_lock(&bucket->lock);
144 list_add_rcu(&h->h_link, &bucket->head);
146 spin_unlock(&bucket->lock);
150 EXPORT_SYMBOL(class_handle_hash_back);
152 void *class_handle2object(__u64 cookie, const void *owner)
154 struct handle_bucket *bucket;
155 struct portals_handle *h;
160 LASSERT(handle_hash != NULL);
163 * Be careful when you want to change this code. See the
164 * rcu_read_lock() definition on top this file. - jxiong
166 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
169 list_for_each_entry_rcu(h, &bucket->head, h_link) {
170 if (h->h_cookie != cookie || h->h_owner != owner)
173 spin_lock(&h->h_lock);
174 if (likely(h->h_in != 0)) {
175 h->h_ops->hop_addref(h);
178 spin_unlock(&h->h_lock);
185 EXPORT_SYMBOL(class_handle2object);
187 void class_handle_free_cb(struct rcu_head *rcu)
189 struct portals_handle *h;
192 h = container_of(rcu, struct portals_handle, h_rcu);
193 ptr = (void *)(unsigned long)h->h_cookie;
195 if (h->h_ops->hop_free != NULL)
196 h->h_ops->hop_free(ptr, h->h_size);
198 OBD_FREE(ptr, h->h_size);
200 EXPORT_SYMBOL(class_handle_free_cb);
202 int class_handle_init(void)
204 struct handle_bucket *bucket;
206 LASSERT(handle_hash == NULL);
208 OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
209 if (handle_hash == NULL)
212 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
214 INIT_LIST_HEAD(&bucket->head);
215 spin_lock_init(&bucket->lock);
218 cfs_get_random_bytes(&handle_base, sizeof(handle_base));
219 LASSERT(handle_base != 0ULL);
224 static int cleanup_all_handles(void)
229 for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
230 struct portals_handle *h;
232 spin_lock(&handle_hash[i].lock);
233 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
234 CERROR("force clean handle %#llx addr %p ops %p\n",
235 h->h_cookie, h, h->h_ops);
237 class_handle_unhash_nolock(h);
240 spin_unlock(&handle_hash[i].lock);
246 void class_handle_cleanup(void)
250 LASSERT(handle_hash != NULL);
252 count = cleanup_all_handles();
254 OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
258 CERROR("handle_count at cleanup: %d\n", count);