1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_LOV
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
31 #include <linux/obd_support.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_idl.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_ucache.h>
38 * current ucache implementation is simply took from group hash code, almost
39 * without any change. it's very simple and have very limited functionality,
40 * and probably it's also only suitable for usage of group hash.
43 void upcall_cache_init_entry(struct upcall_cache *cache,
44 struct upcall_cache_entry *entry,
47 UC_CACHE_SET_NEW(entry);
48 INIT_LIST_HEAD(&entry->ue_hash);
49 atomic_set(&entry->ue_refcount, 0);
51 entry->ue_cache = cache;
52 init_waitqueue_head(&entry->ue_waitq);
54 EXPORT_SYMBOL(upcall_cache_init_entry);
56 static inline struct upcall_cache_entry *
57 alloc_entry(struct upcall_cache *cache, __u64 key)
59 LASSERT(cache->alloc_entry);
60 return cache->alloc_entry(cache, key);
63 static void free_entry(struct upcall_cache_entry *entry)
65 struct upcall_cache *cache = entry->ue_cache;
68 LASSERT(cache->free_entry);
69 LASSERT(atomic_read(&entry->ue_refcount) == 0);
71 CDEBUG(D_OTHER, "destroy %s entry %p for key "LPU64"\n",
72 cache->uc_name, entry, entry->ue_key);
74 list_del(&entry->ue_hash);
75 cache->free_entry(cache, entry);
78 static inline void get_entry(struct upcall_cache_entry *entry)
80 atomic_inc(&entry->ue_refcount);
83 static inline void put_entry(struct upcall_cache_entry *entry)
85 if (atomic_dec_and_test(&entry->ue_refcount) &&
86 !UC_CACHE_IS_VALID(entry)) {
91 static inline int refresh_entry(struct upcall_cache_entry *entry)
93 struct upcall_cache *cache = entry->ue_cache;
96 LASSERT(cache->make_upcall);
98 return cache->make_upcall(cache, entry);
101 static int check_unlink_entry(struct upcall_cache_entry *entry)
103 if (UC_CACHE_IS_VALID(entry) &&
104 time_before(get_seconds(), entry->ue_expire))
107 if (UC_CACHE_IS_ACQUIRING(entry) &&
108 time_after(get_seconds(), entry->ue_acquire_expire)) {
109 UC_CACHE_SET_EXPIRED(entry);
110 wake_up_all(&entry->ue_waitq);
111 } else if (!UC_CACHE_IS_INVALID(entry)) {
112 UC_CACHE_SET_EXPIRED(entry);
115 list_del_init(&entry->ue_hash);
116 if (!atomic_read(&entry->ue_refcount))
122 * currently always use write_lock
124 static struct upcall_cache_entry *
125 __get_entry(struct upcall_cache *cache, unsigned int hash, __u64 key,
126 int create, int async)
128 struct list_head *head;
129 struct upcall_cache_entry *entry, *next, *new = NULL;
133 LASSERT(hash < cache->uc_hashsize);
135 head = &cache->uc_hashtable[hash];
138 write_lock(&cache->uc_hashlock);
139 list_for_each_entry_safe(entry, next, head, ue_hash) {
140 if (check_unlink_entry(entry))
142 if (entry->ue_key == key) {
152 write_unlock(&cache->uc_hashlock);
153 new = alloc_entry(cache, key);
155 CERROR("fail to alloc entry\n");
160 list_add(&new->ue_hash, head);
168 list_move(&entry->ue_hash, head);
172 /* as for this moment, we have found matched entry
173 * and hold a ref of it. if it's NEW (we created it),
174 * we must give it a push to refresh
176 if (UC_CACHE_IS_NEW(entry)) {
177 LASSERT(entry == new);
178 UC_CACHE_SET_ACQUIRING(entry);
179 UC_CACHE_CLEAR_NEW(entry);
180 entry->ue_acquire_expire = get_seconds() +
181 cache->uc_acquire_expire;
183 write_unlock(&cache->uc_hashlock);
184 rc = refresh_entry(entry);
185 write_lock(&cache->uc_hashlock);
187 UC_CACHE_CLEAR_ACQUIRING(entry);
188 UC_CACHE_SET_INVALID(entry);
192 /* caller don't want to wait */
194 write_unlock(&cache->uc_hashlock);
198 /* someone (and only one) is doing upcall upon
199 * this item, just wait it complete
201 if (UC_CACHE_IS_ACQUIRING(entry)) {
204 init_waitqueue_entry(&wait, current);
205 add_wait_queue(&entry->ue_waitq, &wait);
206 set_current_state(TASK_INTERRUPTIBLE);
207 write_unlock(&cache->uc_hashlock);
209 schedule_timeout(cache->uc_acquire_expire);
211 write_lock(&cache->uc_hashlock);
212 remove_wait_queue(&entry->ue_waitq, &wait);
213 if (UC_CACHE_IS_ACQUIRING(entry)) {
214 /* we're interrupted or upcall failed
217 CERROR("entry %p not refreshed: cur %lu, key "LPU64", "
218 "ref %d fl %u, ac %ld, ex %ld\n",
219 entry, get_seconds(), entry->ue_key,
220 atomic_read(&entry->ue_refcount),
221 entry->ue_flags, entry->ue_acquire_expire,
224 write_unlock(&cache->uc_hashlock);
230 /* invalid means error, don't need to try again */
231 if (UC_CACHE_IS_INVALID(entry)) {
233 write_unlock(&cache->uc_hashlock);
238 * We can't refresh the existed one because some
239 * memory might be shared by multiple processes.
241 if (check_unlink_entry(entry)) {
242 /* if expired, try again. but if this entry is
243 * created by me and too quickly turn to expired
244 * without any error, should at least give a
245 * chance to use it once.
249 write_unlock(&cache->uc_hashlock);
255 /* Now we know it's good */
256 LASSERT(UC_CACHE_IS_VALID(entry));
257 write_unlock(&cache->uc_hashlock);
262 struct upcall_cache_entry *
263 upcall_cache_get_entry(struct upcall_cache *cache, __u64 key)
267 LASSERT(cache->hash);
269 hash = cache->hash(cache, key);
271 return __get_entry(cache, hash, key, 1, 0);
273 EXPORT_SYMBOL(upcall_cache_get_entry);
275 void upcall_cache_put_entry(struct upcall_cache_entry *entry)
277 struct upcall_cache *cache = entry->ue_cache;
279 write_lock(&cache->uc_hashlock);
280 LASSERTF(atomic_read(&entry->ue_refcount) > 0,
281 "entry %p: ref %d\n", entry, atomic_read(&entry->ue_refcount));
283 write_unlock(&cache->uc_hashlock);
285 EXPORT_SYMBOL(upcall_cache_put_entry);
287 int upcall_cache_downcall(struct upcall_cache *cache, __u64 key,
290 struct list_head *head;
291 struct upcall_cache_entry *entry;
296 hash = cache->hash(cache, key);
297 LASSERT(hash < cache->uc_hashsize);
299 head = &cache->uc_hashtable[hash];
301 write_lock(&cache->uc_hashlock);
302 list_for_each_entry(entry, head, ue_hash) {
303 if (entry->ue_key == key) {
309 /* haven't found, it's possible */
310 write_unlock(&cache->uc_hashlock);
311 CWARN("key "LPU64" entry dosen't found\n", key);
316 UC_CACHE_SET_INVALID(entry);
320 if (!UC_CACHE_IS_ACQUIRING(entry) ||
321 UC_CACHE_IS_INVALID(entry) ||
322 UC_CACHE_IS_EXPIRED(entry)) {
323 CWARN("stale entry %p: cur %lu, key "LPU64", ref %d, "
324 "fl %u, ac %ld, ex %ld\n",
325 entry, get_seconds(), entry->ue_key,
326 atomic_read(&entry->ue_refcount), entry->ue_flags,
327 entry->ue_acquire_expire, entry->ue_expire);
328 GOTO(out, rc = -EINVAL);
331 atomic_inc(&entry->ue_refcount);
332 write_unlock(&cache->uc_hashlock);
333 rc = cache->parse_downcall(cache, entry, args);
334 write_lock(&cache->uc_hashlock);
335 atomic_dec(&entry->ue_refcount);
337 UC_CACHE_SET_INVALID(entry);
338 list_del_init(&entry->ue_hash);
341 entry->ue_expire = get_seconds() + cache->uc_entry_expire;
342 UC_CACHE_SET_VALID(entry);
343 CDEBUG(D_OTHER, "create ucache entry %p(key "LPU64")\n",
344 entry, entry->ue_key);
346 wake_up_all(&entry->ue_waitq);
347 write_unlock(&cache->uc_hashlock);
350 EXPORT_SYMBOL(upcall_cache_downcall);
352 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key)
354 struct list_head *head;
355 struct upcall_cache_entry *entry;
360 hash = cache->hash(cache, key);
361 LASSERT(hash < cache->uc_hashsize);
363 head = &cache->uc_hashtable[hash];
365 write_lock(&cache->uc_hashlock);
366 list_for_each_entry(entry, head, ue_hash) {
367 if (entry->ue_key == key) {
374 UC_CACHE_SET_EXPIRED(entry);
375 if (!atomic_read(&entry->ue_refcount))
378 write_unlock(&cache->uc_hashlock);
380 EXPORT_SYMBOL(upcall_cache_flush_one);
382 static void cache_flush(struct upcall_cache *cache, int force, int sync)
384 struct upcall_cache_entry *entry, *next;
388 write_lock(&cache->uc_hashlock);
389 for (i = 0; i < cache->uc_hashsize; i++) {
390 list_for_each_entry_safe(entry, next,
391 &cache->uc_hashtable[i], ue_hash) {
392 if (!force && atomic_read(&entry->ue_refcount)) {
393 UC_CACHE_SET_EXPIRED(entry);
396 LASSERT(!atomic_read(&entry->ue_refcount));
400 write_unlock(&cache->uc_hashlock);
404 void upcall_cache_flush_idle(struct upcall_cache *cache)
406 cache_flush(cache, 0, 0);
409 void upcall_cache_flush_all(struct upcall_cache *cache)
411 cache_flush(cache, 1, 0);
413 EXPORT_SYMBOL(upcall_cache_flush_idle);
414 EXPORT_SYMBOL(upcall_cache_flush_all);