4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/lustre_handles.c
34 * Author: Phil Schwan <phil@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <linux/random.h>
41 #include <obd_support.h>
42 #include <lustre_handles.h>
43 #include <lustre_lib.h>
46 static __u64 handle_base;
48 static DEFINE_SPINLOCK(handle_base_lock);
50 static struct handle_bucket {
52 struct list_head head;
55 #define HANDLE_HASH_SIZE (1 << 16)
56 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
59 * Generate a unique 64bit cookie (hash) for a handle and insert it into
60 * global (per-node) hash-table.
62 void class_handle_hash(struct portals_handle *h, const char *owner)
64 struct handle_bucket *bucket;
69 LASSERT(list_empty(&h->h_link));
72 * This is fast, but simplistic cookie generation algorithm, it will
73 * need a re-do at some point in the future for security.
75 spin_lock(&handle_base_lock);
76 handle_base += HANDLE_INCR;
78 if (unlikely(handle_base == 0)) {
80 * Cookie of zero is "dangerous", because in many places it's
81 * assumed that 0 means "unassigned" handle, not bound to any
84 CWARN("The universe has been exhausted: cookie wrap-around.\n");
85 handle_base += HANDLE_INCR;
87 h->h_cookie = handle_base;
88 spin_unlock(&handle_base_lock);
91 spin_lock_init(&h->h_lock);
93 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
94 spin_lock(&bucket->lock);
95 list_add_rcu(&h->h_link, &bucket->head);
97 spin_unlock(&bucket->lock);
99 CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
103 EXPORT_SYMBOL(class_handle_hash);
105 static void class_handle_unhash_nolock(struct portals_handle *h)
107 if (list_empty(&h->h_link)) {
108 CERROR("removing an already-removed handle (%#llx)\n",
113 CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
116 spin_lock(&h->h_lock);
118 spin_unlock(&h->h_lock);
122 spin_unlock(&h->h_lock);
123 list_del_rcu(&h->h_link);
126 void class_handle_unhash(struct portals_handle *h)
128 struct handle_bucket *bucket;
129 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
131 spin_lock(&bucket->lock);
132 class_handle_unhash_nolock(h);
133 spin_unlock(&bucket->lock);
135 EXPORT_SYMBOL(class_handle_unhash);
137 void *class_handle2object(u64 cookie, const char *owner)
139 struct handle_bucket *bucket;
140 struct portals_handle *h;
145 LASSERT(handle_hash != NULL);
148 * Be careful when you want to change this code. See the
149 * rcu_read_lock() definition on top this file. - jxiong
151 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
154 list_for_each_entry_rcu(h, &bucket->head, h_link) {
155 if (h->h_cookie != cookie || h->h_owner != owner)
158 if (refcount_inc_not_zero(&h->h_ref)) {
159 CDEBUG(D_INFO, "GET %s %p refcount=%d\n",
161 refcount_read(&h->h_ref));
170 EXPORT_SYMBOL(class_handle2object);
172 int class_handle_init(void)
174 struct handle_bucket *bucket;
176 LASSERT(handle_hash == NULL);
178 OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
179 if (handle_hash == NULL)
182 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
184 INIT_LIST_HEAD(&bucket->head);
185 spin_lock_init(&bucket->lock);
188 get_random_bytes(&handle_base, sizeof(handle_base));
189 LASSERT(handle_base != 0ULL);
194 static int cleanup_all_handles(void)
199 for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
200 struct portals_handle *h;
202 spin_lock(&handle_hash[i].lock);
203 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
204 CERROR("force clean handle %#llx addr %p owner %p\n",
205 h->h_cookie, h, h->h_owner);
207 class_handle_unhash_nolock(h);
210 spin_unlock(&handle_hash[i].lock);
216 void class_handle_cleanup(void)
220 LASSERT(handle_hash != NULL);
222 count = cleanup_all_handles();
224 OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
228 CERROR("handle_count at cleanup: %d\n", count);