1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/lvfs/upcall_cache.c
38 * Supplementary groups cache.
41 #define DEBUG_SUBSYSTEM S_SEC
43 #ifndef AUTOCONF_INCLUDED
44 #include <linux/config.h>
46 #include <linux/module.h>
47 #include <linux/kernel.h>
49 #include <linux/kmod.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/version.h>
54 #include <linux/unistd.h>
56 #include <asm/system.h>
57 #include <asm/uaccess.h>
60 #include <linux/stat.h>
61 #include <asm/uaccess.h>
62 #include <linux/slab.h>
64 #include <obd_support.h>
65 #include <lustre_lib.h>
67 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
68 __u64 key, void *args)
70 struct upcall_cache_entry *entry;
76 UC_CACHE_SET_NEW(entry);
77 INIT_LIST_HEAD(&entry->ue_hash);
79 atomic_set(&entry->ue_refcount, 0);
80 init_waitqueue_head(&entry->ue_waitq);
81 if (cache->uc_ops->init_entry)
82 cache->uc_ops->init_entry(entry, args);
86 /* protected by cache lock */
87 static void free_entry(struct upcall_cache *cache,
88 struct upcall_cache_entry *entry)
90 if (cache->uc_ops->free_entry)
91 cache->uc_ops->free_entry(cache, entry);
93 list_del(&entry->ue_hash);
94 CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
95 entry, entry->ue_key);
99 static inline int upcall_compare(struct upcall_cache *cache,
100 struct upcall_cache_entry *entry,
101 __u64 key, void *args)
103 if (entry->ue_key != key)
106 if (cache->uc_ops->upcall_compare)
107 return cache->uc_ops->upcall_compare(cache, entry, key, args);
112 static inline int downcall_compare(struct upcall_cache *cache,
113 struct upcall_cache_entry *entry,
114 __u64 key, void *args)
116 if (entry->ue_key != key)
119 if (cache->uc_ops->downcall_compare)
120 return cache->uc_ops->downcall_compare(cache, entry, key, args);
125 static inline void get_entry(struct upcall_cache_entry *entry)
127 atomic_inc(&entry->ue_refcount);
130 static inline void put_entry(struct upcall_cache *cache,
131 struct upcall_cache_entry *entry)
133 if (atomic_dec_and_test(&entry->ue_refcount) &&
134 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
135 free_entry(cache, entry);
139 static int check_unlink_entry(struct upcall_cache *cache,
140 struct upcall_cache_entry *entry)
142 if (UC_CACHE_IS_VALID(entry) &&
143 time_before(jiffies, entry->ue_expire))
146 if (UC_CACHE_IS_ACQUIRING(entry)) {
147 if (time_before(jiffies, entry->ue_acquire_expire))
150 UC_CACHE_SET_EXPIRED(entry);
151 wake_up_all(&entry->ue_waitq);
152 } else if (!UC_CACHE_IS_INVALID(entry)) {
153 UC_CACHE_SET_EXPIRED(entry);
156 list_del_init(&entry->ue_hash);
157 if (!atomic_read(&entry->ue_refcount))
158 free_entry(cache, entry);
162 static inline int refresh_entry(struct upcall_cache *cache,
163 struct upcall_cache_entry *entry)
165 LASSERT(cache->uc_ops->do_upcall);
166 return cache->uc_ops->do_upcall(cache, entry);
169 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
170 __u64 key, void *args)
172 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
173 struct list_head *head;
180 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
183 spin_lock(&cache->uc_lock);
184 list_for_each_entry_safe(entry, next, head, ue_hash) {
185 /* check invalid & expired items */
186 if (check_unlink_entry(cache, entry))
188 if (upcall_compare(cache, entry, key, args) == 0) {
194 if (!found) { /* didn't find it */
196 spin_unlock(&cache->uc_lock);
197 new = alloc_entry(cache, key, args);
199 CERROR("fail to alloc entry\n");
200 RETURN(ERR_PTR(-ENOMEM));
204 list_add(&new->ue_hash, head);
209 free_entry(cache, new);
212 list_move(&entry->ue_hash, head);
216 /* acquire for new one */
217 if (UC_CACHE_IS_NEW(entry)) {
218 UC_CACHE_SET_ACQUIRING(entry);
219 UC_CACHE_CLEAR_NEW(entry);
220 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
221 spin_unlock(&cache->uc_lock);
222 rc = refresh_entry(cache, entry);
223 spin_lock(&cache->uc_lock);
225 UC_CACHE_CLEAR_ACQUIRING(entry);
226 UC_CACHE_SET_INVALID(entry);
227 if (unlikely(rc == -EREMCHG)) {
228 put_entry(cache, entry);
229 GOTO(out, entry = ERR_PTR(rc));
234 /* someone (and only one) is doing upcall upon
235 * this item, just wait it complete
237 if (UC_CACHE_IS_ACQUIRING(entry)) {
238 unsigned long expiry = jiffies + cache->uc_acquire_expire;
240 init_waitqueue_entry(&wait, current);
241 add_wait_queue(&entry->ue_waitq, &wait);
242 set_current_state(TASK_INTERRUPTIBLE);
243 spin_unlock(&cache->uc_lock);
245 schedule_timeout(cache->uc_acquire_expire);
247 spin_lock(&cache->uc_lock);
248 remove_wait_queue(&entry->ue_waitq, &wait);
249 if (UC_CACHE_IS_ACQUIRING(entry)) {
250 /* we're interrupted or upcall failed in the middle */
251 rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
252 put_entry(cache, entry);
253 CERROR("acquire timeout exceeded for key "LPU64
254 "\n", entry->ue_key);
255 GOTO(out, entry = ERR_PTR(rc));
260 /* invalid means error, don't need to try again */
261 if (UC_CACHE_IS_INVALID(entry)) {
262 put_entry(cache, entry);
263 GOTO(out, entry = ERR_PTR(-EIDRM));
267 * We can't refresh the existing one because some
268 * memory might be shared by multiple processes.
270 if (check_unlink_entry(cache, entry)) {
271 /* if expired, try again. but if this entry is
272 * created by me but too quickly turn to expired
273 * without any error, should at least give a
274 * chance to use it once.
277 put_entry(cache, entry);
278 spin_unlock(&cache->uc_lock);
284 /* Now we know it's good */
286 spin_unlock(&cache->uc_lock);
289 EXPORT_SYMBOL(upcall_cache_get_entry);
291 void upcall_cache_put_entry(struct upcall_cache *cache,
292 struct upcall_cache_entry *entry)
301 LASSERT(atomic_read(&entry->ue_refcount) > 0);
302 spin_lock(&cache->uc_lock);
303 put_entry(cache, entry);
304 spin_unlock(&cache->uc_lock);
307 EXPORT_SYMBOL(upcall_cache_put_entry);
309 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
312 struct upcall_cache_entry *entry = NULL;
313 struct list_head *head;
314 int found = 0, rc = 0;
319 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
321 spin_lock(&cache->uc_lock);
322 list_for_each_entry(entry, head, ue_hash) {
323 if (downcall_compare(cache, entry, key, args) == 0) {
331 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
332 cache->uc_name, key);
333 /* haven't found, it's possible */
334 spin_unlock(&cache->uc_lock);
339 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
340 cache->uc_name, entry->ue_key, err);
341 GOTO(out, rc = -EINVAL);
344 if (!UC_CACHE_IS_ACQUIRING(entry)) {
345 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
346 cache->uc_name, entry, entry->ue_key);
350 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
351 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
352 cache->uc_name, entry, entry->ue_key);
353 GOTO(out, rc = -EINVAL);
356 spin_unlock(&cache->uc_lock);
357 if (cache->uc_ops->parse_downcall)
358 rc = cache->uc_ops->parse_downcall(cache, entry, args);
359 spin_lock(&cache->uc_lock);
363 entry->ue_expire = jiffies + cache->uc_entry_expire;
364 UC_CACHE_SET_VALID(entry);
365 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
366 cache->uc_name, entry, entry->ue_key);
369 UC_CACHE_SET_INVALID(entry);
370 list_del_init(&entry->ue_hash);
372 UC_CACHE_CLEAR_ACQUIRING(entry);
373 spin_unlock(&cache->uc_lock);
374 wake_up_all(&entry->ue_waitq);
375 put_entry(cache, entry);
379 EXPORT_SYMBOL(upcall_cache_downcall);
381 static void cache_flush(struct upcall_cache *cache, int force)
383 struct upcall_cache_entry *entry, *next;
387 spin_lock(&cache->uc_lock);
388 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
389 list_for_each_entry_safe(entry, next,
390 &cache->uc_hashtable[i], ue_hash) {
391 if (!force && atomic_read(&entry->ue_refcount)) {
392 UC_CACHE_SET_EXPIRED(entry);
395 LASSERT(!atomic_read(&entry->ue_refcount));
396 free_entry(cache, entry);
399 spin_unlock(&cache->uc_lock);
403 void upcall_cache_flush_idle(struct upcall_cache *cache)
405 cache_flush(cache, 0);
407 EXPORT_SYMBOL(upcall_cache_flush_idle);
409 void upcall_cache_flush_all(struct upcall_cache *cache)
411 cache_flush(cache, 1);
413 EXPORT_SYMBOL(upcall_cache_flush_all);
415 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
417 struct list_head *head;
418 struct upcall_cache_entry *entry;
422 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
424 spin_lock(&cache->uc_lock);
425 list_for_each_entry(entry, head, ue_hash) {
426 if (upcall_compare(cache, entry, key, args) == 0) {
433 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
434 "cur %lu, ex %ld/%ld\n",
435 cache->uc_name, entry, entry->ue_key,
436 atomic_read(&entry->ue_refcount), entry->ue_flags,
437 get_seconds(), entry->ue_acquire_expire,
439 UC_CACHE_SET_EXPIRED(entry);
440 if (!atomic_read(&entry->ue_refcount))
441 free_entry(cache, entry);
443 spin_unlock(&cache->uc_lock);
445 EXPORT_SYMBOL(upcall_cache_flush_one);
447 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
448 struct upcall_cache_ops *ops)
450 struct upcall_cache *cache;
454 OBD_ALLOC(cache, sizeof(*cache));
456 RETURN(ERR_PTR(-ENOMEM));
458 spin_lock_init(&cache->uc_lock);
459 rwlock_init(&cache->uc_upcall_rwlock);
460 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
461 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
462 strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
463 /* upcall pathname proc tunable */
464 strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
465 cache->uc_entry_expire = 10 * 60 * HZ;
466 cache->uc_acquire_expire = 15 * HZ;
471 EXPORT_SYMBOL(upcall_cache_init);
473 void upcall_cache_cleanup(struct upcall_cache *cache)
477 upcall_cache_flush_all(cache);
478 OBD_FREE(cache, sizeof(*cache));
480 EXPORT_SYMBOL(upcall_cache_cleanup);