1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Supplementary groups cache.
6 * Copyright (c) 2004 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_SEC
26 #ifndef AUTOCONF_INCLUDED
27 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
32 #include <linux/kmod.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/errno.h>
36 #include <linux/version.h>
37 #include <linux/unistd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
48 #include <obd_support.h>
49 #include <lustre_lib.h>
51 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
52 __u64 key, void *args)
54 struct upcall_cache_entry *entry;
60 UC_CACHE_SET_NEW(entry);
61 INIT_LIST_HEAD(&entry->ue_hash);
63 atomic_set(&entry->ue_refcount, 0);
64 init_waitqueue_head(&entry->ue_waitq);
65 if (cache->uc_ops->init_entry)
66 cache->uc_ops->init_entry(entry, args);
70 /* protected by cache lock */
71 static void free_entry(struct upcall_cache *cache,
72 struct upcall_cache_entry *entry)
74 if (cache->uc_ops->free_entry)
75 cache->uc_ops->free_entry(cache, entry);
77 list_del(&entry->ue_hash);
78 CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
79 entry, entry->ue_key);
83 static inline int upcall_compare(struct upcall_cache *cache,
84 struct upcall_cache_entry *entry,
85 __u64 key, void *args)
87 if (entry->ue_key != key)
90 if (cache->uc_ops->upcall_compare)
91 return cache->uc_ops->upcall_compare(cache, entry, key, args);
96 static inline int downcall_compare(struct upcall_cache *cache,
97 struct upcall_cache_entry *entry,
98 __u64 key, void *args)
100 if (entry->ue_key != key)
103 if (cache->uc_ops->downcall_compare)
104 return cache->uc_ops->downcall_compare(cache, entry, key, args);
109 static inline void get_entry(struct upcall_cache_entry *entry)
111 atomic_inc(&entry->ue_refcount);
114 static inline void put_entry(struct upcall_cache *cache,
115 struct upcall_cache_entry *entry)
117 if (atomic_dec_and_test(&entry->ue_refcount) &&
118 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
119 free_entry(cache, entry);
123 static int check_unlink_entry(struct upcall_cache *cache,
124 struct upcall_cache_entry *entry)
126 if (UC_CACHE_IS_VALID(entry) &&
127 time_before(jiffies, entry->ue_expire))
130 if (UC_CACHE_IS_ACQUIRING(entry)) {
131 if (time_before(jiffies, entry->ue_acquire_expire))
134 UC_CACHE_SET_EXPIRED(entry);
135 wake_up_all(&entry->ue_waitq);
136 } else if (!UC_CACHE_IS_INVALID(entry)) {
137 UC_CACHE_SET_EXPIRED(entry);
140 list_del_init(&entry->ue_hash);
141 if (!atomic_read(&entry->ue_refcount))
142 free_entry(cache, entry);
146 static inline int refresh_entry(struct upcall_cache *cache,
147 struct upcall_cache_entry *entry)
149 LASSERT(cache->uc_ops->do_upcall);
150 return cache->uc_ops->do_upcall(cache, entry);
153 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
154 __u64 key, void *args)
156 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
157 struct list_head *head;
164 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
167 spin_lock(&cache->uc_lock);
168 list_for_each_entry_safe(entry, next, head, ue_hash) {
169 /* check invalid & expired items */
170 if (check_unlink_entry(cache, entry))
172 if (upcall_compare(cache, entry, key, args) == 0) {
178 if (!found) { /* didn't find it */
180 spin_unlock(&cache->uc_lock);
181 new = alloc_entry(cache, key, args);
183 CERROR("fail to alloc entry\n");
184 RETURN(ERR_PTR(-ENOMEM));
188 list_add(&new->ue_hash, head);
193 free_entry(cache, new);
196 list_move(&entry->ue_hash, head);
200 /* acquire for new one */
201 if (UC_CACHE_IS_NEW(entry)) {
202 UC_CACHE_SET_ACQUIRING(entry);
203 UC_CACHE_CLEAR_NEW(entry);
204 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
205 spin_unlock(&cache->uc_lock);
206 rc = refresh_entry(cache, entry);
207 spin_lock(&cache->uc_lock);
209 UC_CACHE_CLEAR_ACQUIRING(entry);
210 UC_CACHE_SET_INVALID(entry);
214 /* someone (and only one) is doing upcall upon
215 * this item, just wait it complete
217 if (UC_CACHE_IS_ACQUIRING(entry)) {
218 unsigned long expiry = jiffies + cache->uc_acquire_expire;
220 init_waitqueue_entry(&wait, current);
221 add_wait_queue(&entry->ue_waitq, &wait);
222 set_current_state(TASK_INTERRUPTIBLE);
223 spin_unlock(&cache->uc_lock);
225 schedule_timeout(cache->uc_acquire_expire);
227 spin_lock(&cache->uc_lock);
228 remove_wait_queue(&entry->ue_waitq, &wait);
229 if (UC_CACHE_IS_ACQUIRING(entry)) {
230 /* we're interrupted or upcall failed in the middle */
231 rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
232 put_entry(cache, entry);
233 CERROR("acquire timeout exceeded for key "LPU64
234 "\n", entry->ue_key);
235 GOTO(out, entry = ERR_PTR(rc));
240 /* invalid means error, don't need to try again */
241 if (UC_CACHE_IS_INVALID(entry)) {
242 put_entry(cache, entry);
243 GOTO(out, entry = ERR_PTR(-EIDRM));
247 * We can't refresh the existing one because some
248 * memory might be shared by multiple processes.
250 if (check_unlink_entry(cache, entry)) {
251 /* if expired, try again. but if this entry is
252 * created by me but too quickly turn to expired
253 * without any error, should at least give a
254 * chance to use it once.
257 put_entry(cache, entry);
258 spin_unlock(&cache->uc_lock);
264 /* Now we know it's good */
266 spin_unlock(&cache->uc_lock);
269 EXPORT_SYMBOL(upcall_cache_get_entry);
271 void upcall_cache_put_entry(struct upcall_cache *cache,
272 struct upcall_cache_entry *entry)
281 LASSERT(atomic_read(&entry->ue_refcount) > 0);
282 spin_lock(&cache->uc_lock);
283 put_entry(cache, entry);
284 spin_unlock(&cache->uc_lock);
287 EXPORT_SYMBOL(upcall_cache_put_entry);
289 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
292 struct upcall_cache_entry *entry = NULL;
293 struct list_head *head;
294 int found = 0, rc = 0;
299 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
301 spin_lock(&cache->uc_lock);
302 list_for_each_entry(entry, head, ue_hash) {
303 if (downcall_compare(cache, entry, key, args) == 0) {
311 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
312 cache->uc_name, key);
313 /* haven't found, it's possible */
314 spin_unlock(&cache->uc_lock);
319 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
320 cache->uc_name, entry->ue_key, err);
321 GOTO(out, rc = -EINVAL);
324 if (!UC_CACHE_IS_ACQUIRING(entry)) {
325 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
326 cache->uc_name, entry, entry->ue_key);
330 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
331 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
332 cache->uc_name, entry, entry->ue_key);
333 GOTO(out, rc = -EINVAL);
336 spin_unlock(&cache->uc_lock);
337 if (cache->uc_ops->parse_downcall)
338 rc = cache->uc_ops->parse_downcall(cache, entry, args);
339 spin_lock(&cache->uc_lock);
343 entry->ue_expire = jiffies + cache->uc_entry_expire;
344 UC_CACHE_SET_VALID(entry);
345 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
346 cache->uc_name, entry, entry->ue_key);
349 UC_CACHE_SET_INVALID(entry);
350 list_del_init(&entry->ue_hash);
352 UC_CACHE_CLEAR_ACQUIRING(entry);
353 spin_unlock(&cache->uc_lock);
354 wake_up_all(&entry->ue_waitq);
355 put_entry(cache, entry);
359 EXPORT_SYMBOL(upcall_cache_downcall);
361 static void cache_flush(struct upcall_cache *cache, int force)
363 struct upcall_cache_entry *entry, *next;
367 spin_lock(&cache->uc_lock);
368 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
369 list_for_each_entry_safe(entry, next,
370 &cache->uc_hashtable[i], ue_hash) {
371 if (!force && atomic_read(&entry->ue_refcount)) {
372 UC_CACHE_SET_EXPIRED(entry);
375 LASSERT(!atomic_read(&entry->ue_refcount));
376 free_entry(cache, entry);
379 spin_unlock(&cache->uc_lock);
383 void upcall_cache_flush_idle(struct upcall_cache *cache)
385 cache_flush(cache, 0);
387 EXPORT_SYMBOL(upcall_cache_flush_idle);
389 void upcall_cache_flush_all(struct upcall_cache *cache)
391 cache_flush(cache, 1);
393 EXPORT_SYMBOL(upcall_cache_flush_all);
395 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
397 struct list_head *head;
398 struct upcall_cache_entry *entry;
402 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
404 spin_lock(&cache->uc_lock);
405 list_for_each_entry(entry, head, ue_hash) {
406 if (upcall_compare(cache, entry, key, args) == 0) {
413 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
414 "cur %lu, ex %ld/%ld\n",
415 cache->uc_name, entry, entry->ue_key,
416 atomic_read(&entry->ue_refcount), entry->ue_flags,
417 get_seconds(), entry->ue_acquire_expire,
419 UC_CACHE_SET_EXPIRED(entry);
420 if (!atomic_read(&entry->ue_refcount))
421 free_entry(cache, entry);
423 spin_unlock(&cache->uc_lock);
425 EXPORT_SYMBOL(upcall_cache_flush_one);
427 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
428 struct upcall_cache_ops *ops)
430 struct upcall_cache *cache;
434 OBD_ALLOC(cache, sizeof(*cache));
436 RETURN(ERR_PTR(-ENOMEM));
438 spin_lock_init(&cache->uc_lock);
439 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
440 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
441 strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
442 /* upcall pathname proc tunable */
443 strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
444 cache->uc_entry_expire = 10 * 60 * HZ;
445 cache->uc_acquire_expire = 15 * HZ;
450 EXPORT_SYMBOL(upcall_cache_init);
452 void upcall_cache_cleanup(struct upcall_cache *cache)
456 upcall_cache_flush_all(cache);
457 OBD_FREE(cache, sizeof(*cache));
459 EXPORT_SYMBOL(upcall_cache_cleanup);