1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/lvfs/upcall_cache.c
38 * Supplementary groups cache.
41 #define DEBUG_SUBSYSTEM S_SEC
43 #ifndef AUTOCONF_INCLUDED
44 #include <linux/config.h>
46 #include <linux/module.h>
47 #include <linux/kernel.h>
49 #include <linux/kmod.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/version.h>
54 #include <linux/unistd.h>
56 #include <asm/system.h>
57 #include <asm/uaccess.h>
60 #include <linux/stat.h>
61 #include <asm/uaccess.h>
62 #include <linux/slab.h>
64 #include <obd_support.h>
65 #include <lustre_lib.h>
67 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
68 __u64 key, void *args)
70 struct upcall_cache_entry *entry;
76 UC_CACHE_SET_NEW(entry);
77 CFS_INIT_LIST_HEAD(&entry->ue_hash);
79 cfs_atomic_set(&entry->ue_refcount, 0);
80 cfs_waitq_init(&entry->ue_waitq);
81 if (cache->uc_ops->init_entry)
82 cache->uc_ops->init_entry(entry, args);
86 /* protected by cache lock */
87 static void free_entry(struct upcall_cache *cache,
88 struct upcall_cache_entry *entry)
90 if (cache->uc_ops->free_entry)
91 cache->uc_ops->free_entry(cache, entry);
93 cfs_list_del(&entry->ue_hash);
94 CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
95 entry, entry->ue_key);
99 static inline int upcall_compare(struct upcall_cache *cache,
100 struct upcall_cache_entry *entry,
101 __u64 key, void *args)
103 if (entry->ue_key != key)
106 if (cache->uc_ops->upcall_compare)
107 return cache->uc_ops->upcall_compare(cache, entry, key, args);
112 static inline int downcall_compare(struct upcall_cache *cache,
113 struct upcall_cache_entry *entry,
114 __u64 key, void *args)
116 if (entry->ue_key != key)
119 if (cache->uc_ops->downcall_compare)
120 return cache->uc_ops->downcall_compare(cache, entry, key, args);
125 static inline void get_entry(struct upcall_cache_entry *entry)
127 cfs_atomic_inc(&entry->ue_refcount);
130 static inline void put_entry(struct upcall_cache *cache,
131 struct upcall_cache_entry *entry)
133 if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
134 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
135 free_entry(cache, entry);
139 static int check_unlink_entry(struct upcall_cache *cache,
140 struct upcall_cache_entry *entry)
142 if (UC_CACHE_IS_VALID(entry) &&
143 cfs_time_before(jiffies, entry->ue_expire))
146 if (UC_CACHE_IS_ACQUIRING(entry)) {
147 if (cfs_time_before(jiffies, entry->ue_acquire_expire))
150 UC_CACHE_SET_EXPIRED(entry);
151 cfs_waitq_broadcast(&entry->ue_waitq);
152 } else if (!UC_CACHE_IS_INVALID(entry)) {
153 UC_CACHE_SET_EXPIRED(entry);
156 cfs_list_del_init(&entry->ue_hash);
157 if (!cfs_atomic_read(&entry->ue_refcount))
158 free_entry(cache, entry);
162 static inline int refresh_entry(struct upcall_cache *cache,
163 struct upcall_cache_entry *entry)
165 LASSERT(cache->uc_ops->do_upcall);
166 return cache->uc_ops->do_upcall(cache, entry);
169 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
170 __u64 key, void *args)
172 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
180 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
183 cfs_spin_lock(&cache->uc_lock);
184 cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
185 /* check invalid & expired items */
186 if (check_unlink_entry(cache, entry))
188 if (upcall_compare(cache, entry, key, args) == 0) {
194 if (!found) { /* didn't find it */
196 cfs_spin_unlock(&cache->uc_lock);
197 new = alloc_entry(cache, key, args);
199 CERROR("fail to alloc entry\n");
200 RETURN(ERR_PTR(-ENOMEM));
204 cfs_list_add(&new->ue_hash, head);
209 free_entry(cache, new);
212 cfs_list_move(&entry->ue_hash, head);
216 /* acquire for new one */
217 if (UC_CACHE_IS_NEW(entry)) {
218 UC_CACHE_SET_ACQUIRING(entry);
219 UC_CACHE_CLEAR_NEW(entry);
220 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
221 cfs_spin_unlock(&cache->uc_lock);
222 rc = refresh_entry(cache, entry);
223 cfs_spin_lock(&cache->uc_lock);
225 UC_CACHE_CLEAR_ACQUIRING(entry);
226 UC_CACHE_SET_INVALID(entry);
227 if (unlikely(rc == -EREMCHG)) {
228 put_entry(cache, entry);
229 GOTO(out, entry = ERR_PTR(rc));
234 /* someone (and only one) is doing upcall upon
235 * this item, just wait it complete
237 if (UC_CACHE_IS_ACQUIRING(entry)) {
238 unsigned long expiry = jiffies + cache->uc_acquire_expire;
240 cfs_waitlink_init(&wait);
241 cfs_waitq_add(&entry->ue_waitq, &wait);
242 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
243 cfs_spin_unlock(&cache->uc_lock);
245 cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
246 cache->uc_acquire_expire);
248 cfs_spin_lock(&cache->uc_lock);
249 cfs_waitq_del(&entry->ue_waitq, &wait);
250 if (UC_CACHE_IS_ACQUIRING(entry)) {
251 /* we're interrupted or upcall failed in the middle */
252 rc = cfs_time_before(jiffies, expiry) ? \
254 put_entry(cache, entry);
255 CERROR("acquire timeout exceeded for key "LPU64
256 "\n", entry->ue_key);
257 GOTO(out, entry = ERR_PTR(rc));
262 /* invalid means error, don't need to try again */
263 if (UC_CACHE_IS_INVALID(entry)) {
264 put_entry(cache, entry);
265 GOTO(out, entry = ERR_PTR(-EIDRM));
269 * We can't refresh the existing one because some
270 * memory might be shared by multiple processes.
272 if (check_unlink_entry(cache, entry)) {
273 /* if expired, try again. but if this entry is
274 * created by me but too quickly turn to expired
275 * without any error, should at least give a
276 * chance to use it once.
279 put_entry(cache, entry);
280 cfs_spin_unlock(&cache->uc_lock);
286 /* Now we know it's good */
288 cfs_spin_unlock(&cache->uc_lock);
291 EXPORT_SYMBOL(upcall_cache_get_entry);
293 void upcall_cache_put_entry(struct upcall_cache *cache,
294 struct upcall_cache_entry *entry)
303 LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
304 cfs_spin_lock(&cache->uc_lock);
305 put_entry(cache, entry);
306 cfs_spin_unlock(&cache->uc_lock);
309 EXPORT_SYMBOL(upcall_cache_put_entry);
311 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
314 struct upcall_cache_entry *entry = NULL;
316 int found = 0, rc = 0;
321 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
323 cfs_spin_lock(&cache->uc_lock);
324 cfs_list_for_each_entry(entry, head, ue_hash) {
325 if (downcall_compare(cache, entry, key, args) == 0) {
333 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
334 cache->uc_name, key);
335 /* haven't found, it's possible */
336 cfs_spin_unlock(&cache->uc_lock);
341 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
342 cache->uc_name, entry->ue_key, err);
343 GOTO(out, rc = -EINVAL);
346 if (!UC_CACHE_IS_ACQUIRING(entry)) {
347 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
348 cache->uc_name, entry, entry->ue_key);
352 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
353 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
354 cache->uc_name, entry, entry->ue_key);
355 GOTO(out, rc = -EINVAL);
358 cfs_spin_unlock(&cache->uc_lock);
359 if (cache->uc_ops->parse_downcall)
360 rc = cache->uc_ops->parse_downcall(cache, entry, args);
361 cfs_spin_lock(&cache->uc_lock);
365 entry->ue_expire = jiffies + cache->uc_entry_expire;
366 UC_CACHE_SET_VALID(entry);
367 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
368 cache->uc_name, entry, entry->ue_key);
371 UC_CACHE_SET_INVALID(entry);
372 cfs_list_del_init(&entry->ue_hash);
374 UC_CACHE_CLEAR_ACQUIRING(entry);
375 cfs_spin_unlock(&cache->uc_lock);
376 cfs_waitq_broadcast(&entry->ue_waitq);
377 put_entry(cache, entry);
381 EXPORT_SYMBOL(upcall_cache_downcall);
383 static void cache_flush(struct upcall_cache *cache, int force)
385 struct upcall_cache_entry *entry, *next;
389 cfs_spin_lock(&cache->uc_lock);
390 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
391 cfs_list_for_each_entry_safe(entry, next,
392 &cache->uc_hashtable[i], ue_hash) {
393 if (!force && cfs_atomic_read(&entry->ue_refcount)) {
394 UC_CACHE_SET_EXPIRED(entry);
397 LASSERT(!cfs_atomic_read(&entry->ue_refcount));
398 free_entry(cache, entry);
401 cfs_spin_unlock(&cache->uc_lock);
405 void upcall_cache_flush_idle(struct upcall_cache *cache)
407 cache_flush(cache, 0);
409 EXPORT_SYMBOL(upcall_cache_flush_idle);
411 void upcall_cache_flush_all(struct upcall_cache *cache)
413 cache_flush(cache, 1);
415 EXPORT_SYMBOL(upcall_cache_flush_all);
417 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
420 struct upcall_cache_entry *entry;
424 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
426 cfs_spin_lock(&cache->uc_lock);
427 cfs_list_for_each_entry(entry, head, ue_hash) {
428 if (upcall_compare(cache, entry, key, args) == 0) {
435 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
436 "cur %lu, ex %ld/%ld\n",
437 cache->uc_name, entry, entry->ue_key,
438 cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
439 get_seconds(), entry->ue_acquire_expire,
441 UC_CACHE_SET_EXPIRED(entry);
442 if (!cfs_atomic_read(&entry->ue_refcount))
443 free_entry(cache, entry);
445 cfs_spin_unlock(&cache->uc_lock);
447 EXPORT_SYMBOL(upcall_cache_flush_one);
449 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
450 struct upcall_cache_ops *ops)
452 struct upcall_cache *cache;
456 OBD_ALLOC(cache, sizeof(*cache));
458 RETURN(ERR_PTR(-ENOMEM));
460 cfs_spin_lock_init(&cache->uc_lock);
461 cfs_rwlock_init(&cache->uc_upcall_rwlock);
462 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
463 CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
464 strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
465 /* upcall pathname proc tunable */
466 strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
467 cache->uc_entry_expire = 10 * 60 * CFS_HZ;
468 cache->uc_acquire_expire = 15 * CFS_HZ;
473 EXPORT_SYMBOL(upcall_cache_init);
475 void upcall_cache_cleanup(struct upcall_cache *cache)
479 upcall_cache_flush_all(cache);
480 OBD_FREE(cache, sizeof(*cache));
482 EXPORT_SYMBOL(upcall_cache_cleanup);