1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/lvfs/upcall_cache.c
38 * Supplementary groups cache.
41 #define DEBUG_SUBSYSTEM S_SEC
43 #ifndef AUTOCONF_INCLUDED
44 #include <linux/config.h>
46 #include <linux/module.h>
47 #include <linux/kernel.h>
49 #include <linux/kmod.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/version.h>
54 #include <linux/unistd.h>
56 #include <asm/system.h>
57 #include <asm/uaccess.h>
60 #include <linux/stat.h>
61 #include <asm/uaccess.h>
62 #include <linux/slab.h>
64 #include <obd_support.h>
65 #include <lustre_lib.h>
67 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
68 __u64 key, void *args)
70 struct upcall_cache_entry *entry;
76 UC_CACHE_SET_NEW(entry);
77 CFS_INIT_LIST_HEAD(&entry->ue_hash);
79 cfs_atomic_set(&entry->ue_refcount, 0);
80 cfs_waitq_init(&entry->ue_waitq);
81 if (cache->uc_ops->init_entry)
82 cache->uc_ops->init_entry(entry, args);
86 /* protected by cache lock */
87 static void free_entry(struct upcall_cache *cache,
88 struct upcall_cache_entry *entry)
90 if (cache->uc_ops->free_entry)
91 cache->uc_ops->free_entry(cache, entry);
93 cfs_list_del(&entry->ue_hash);
94 CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
95 entry, entry->ue_key);
99 static inline int upcall_compare(struct upcall_cache *cache,
100 struct upcall_cache_entry *entry,
101 __u64 key, void *args)
103 if (entry->ue_key != key)
106 if (cache->uc_ops->upcall_compare)
107 return cache->uc_ops->upcall_compare(cache, entry, key, args);
112 static inline int downcall_compare(struct upcall_cache *cache,
113 struct upcall_cache_entry *entry,
114 __u64 key, void *args)
116 if (entry->ue_key != key)
119 if (cache->uc_ops->downcall_compare)
120 return cache->uc_ops->downcall_compare(cache, entry, key, args);
125 static inline void get_entry(struct upcall_cache_entry *entry)
127 cfs_atomic_inc(&entry->ue_refcount);
130 static inline void put_entry(struct upcall_cache *cache,
131 struct upcall_cache_entry *entry)
133 if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
134 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
135 free_entry(cache, entry);
139 static int check_unlink_entry(struct upcall_cache *cache,
140 struct upcall_cache_entry *entry)
142 if (UC_CACHE_IS_VALID(entry) &&
143 cfs_time_before(jiffies, entry->ue_expire))
146 if (UC_CACHE_IS_ACQUIRING(entry)) {
147 if (entry->ue_acquire_expire == 0 ||
148 cfs_time_before(jiffies, entry->ue_acquire_expire))
151 UC_CACHE_SET_EXPIRED(entry);
152 cfs_waitq_broadcast(&entry->ue_waitq);
153 } else if (!UC_CACHE_IS_INVALID(entry)) {
154 UC_CACHE_SET_EXPIRED(entry);
157 cfs_list_del_init(&entry->ue_hash);
158 if (!cfs_atomic_read(&entry->ue_refcount))
159 free_entry(cache, entry);
163 static inline int refresh_entry(struct upcall_cache *cache,
164 struct upcall_cache_entry *entry)
166 LASSERT(cache->uc_ops->do_upcall);
167 return cache->uc_ops->do_upcall(cache, entry);
170 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
171 __u64 key, void *args)
173 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
181 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
184 cfs_spin_lock(&cache->uc_lock);
185 cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
186 /* check invalid & expired items */
187 if (check_unlink_entry(cache, entry))
189 if (upcall_compare(cache, entry, key, args) == 0) {
197 cfs_spin_unlock(&cache->uc_lock);
198 new = alloc_entry(cache, key, args);
200 CERROR("fail to alloc entry\n");
201 RETURN(ERR_PTR(-ENOMEM));
205 cfs_list_add(&new->ue_hash, head);
210 free_entry(cache, new);
213 cfs_list_move(&entry->ue_hash, head);
217 /* acquire for new one */
218 if (UC_CACHE_IS_NEW(entry)) {
219 UC_CACHE_SET_ACQUIRING(entry);
220 UC_CACHE_CLEAR_NEW(entry);
221 cfs_spin_unlock(&cache->uc_lock);
222 rc = refresh_entry(cache, entry);
223 cfs_spin_lock(&cache->uc_lock);
224 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
226 UC_CACHE_CLEAR_ACQUIRING(entry);
227 UC_CACHE_SET_INVALID(entry);
228 cfs_waitq_broadcast(&entry->ue_waitq);
229 if (unlikely(rc == -EREMCHG)) {
230 put_entry(cache, entry);
231 GOTO(out, entry = ERR_PTR(rc));
236 /* someone (and only one) is doing upcall upon this item,
237 * wait it to complete */
238 if (UC_CACHE_IS_ACQUIRING(entry)) {
239 long expiry = (entry == new) ? cache->uc_acquire_expire :
240 CFS_MAX_SCHEDULE_TIMEOUT;
243 cfs_waitlink_init(&wait);
244 cfs_waitq_add(&entry->ue_waitq, &wait);
245 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
246 cfs_spin_unlock(&cache->uc_lock);
248 left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
251 cfs_spin_lock(&cache->uc_lock);
252 cfs_waitq_del(&entry->ue_waitq, &wait);
253 if (UC_CACHE_IS_ACQUIRING(entry)) {
254 /* we're interrupted or upcall failed in the middle */
255 rc = left > 0 ? -EINTR : -ETIMEDOUT;
256 CERROR("acquire for key "LPU64": error %d\n",
258 put_entry(cache, entry);
259 GOTO(out, entry = ERR_PTR(rc));
263 /* invalid means error, don't need to try again */
264 if (UC_CACHE_IS_INVALID(entry)) {
265 put_entry(cache, entry);
266 GOTO(out, entry = ERR_PTR(-EIDRM));
270 * We can't refresh the existing one because some
271 * memory might be shared by multiple processes.
273 if (check_unlink_entry(cache, entry)) {
274 /* if expired, try again. but if this entry is
275 * created by me but too quickly turn to expired
276 * without any error, should at least give a
277 * chance to use it once.
280 put_entry(cache, entry);
281 cfs_spin_unlock(&cache->uc_lock);
287 /* Now we know it's good */
289 cfs_spin_unlock(&cache->uc_lock);
292 EXPORT_SYMBOL(upcall_cache_get_entry);
294 void upcall_cache_put_entry(struct upcall_cache *cache,
295 struct upcall_cache_entry *entry)
304 LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
305 cfs_spin_lock(&cache->uc_lock);
306 put_entry(cache, entry);
307 cfs_spin_unlock(&cache->uc_lock);
310 EXPORT_SYMBOL(upcall_cache_put_entry);
312 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
315 struct upcall_cache_entry *entry = NULL;
317 int found = 0, rc = 0;
322 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
324 cfs_spin_lock(&cache->uc_lock);
325 cfs_list_for_each_entry(entry, head, ue_hash) {
326 if (downcall_compare(cache, entry, key, args) == 0) {
334 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
335 cache->uc_name, key);
336 /* haven't found, it's possible */
337 cfs_spin_unlock(&cache->uc_lock);
342 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
343 cache->uc_name, entry->ue_key, err);
344 GOTO(out, rc = -EINVAL);
347 if (!UC_CACHE_IS_ACQUIRING(entry)) {
348 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
349 cache->uc_name, entry, entry->ue_key);
353 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
354 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
355 cache->uc_name, entry, entry->ue_key);
356 GOTO(out, rc = -EINVAL);
359 cfs_spin_unlock(&cache->uc_lock);
360 if (cache->uc_ops->parse_downcall)
361 rc = cache->uc_ops->parse_downcall(cache, entry, args);
362 cfs_spin_lock(&cache->uc_lock);
366 entry->ue_expire = jiffies + cache->uc_entry_expire;
367 UC_CACHE_SET_VALID(entry);
368 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
369 cache->uc_name, entry, entry->ue_key);
372 UC_CACHE_SET_INVALID(entry);
373 cfs_list_del_init(&entry->ue_hash);
375 UC_CACHE_CLEAR_ACQUIRING(entry);
376 cfs_spin_unlock(&cache->uc_lock);
377 cfs_waitq_broadcast(&entry->ue_waitq);
378 put_entry(cache, entry);
382 EXPORT_SYMBOL(upcall_cache_downcall);
384 static void cache_flush(struct upcall_cache *cache, int force)
386 struct upcall_cache_entry *entry, *next;
390 cfs_spin_lock(&cache->uc_lock);
391 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
392 cfs_list_for_each_entry_safe(entry, next,
393 &cache->uc_hashtable[i], ue_hash) {
394 if (!force && cfs_atomic_read(&entry->ue_refcount)) {
395 UC_CACHE_SET_EXPIRED(entry);
398 LASSERT(!cfs_atomic_read(&entry->ue_refcount));
399 free_entry(cache, entry);
402 cfs_spin_unlock(&cache->uc_lock);
406 void upcall_cache_flush_idle(struct upcall_cache *cache)
408 cache_flush(cache, 0);
410 EXPORT_SYMBOL(upcall_cache_flush_idle);
412 void upcall_cache_flush_all(struct upcall_cache *cache)
414 cache_flush(cache, 1);
416 EXPORT_SYMBOL(upcall_cache_flush_all);
418 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
421 struct upcall_cache_entry *entry;
425 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
427 cfs_spin_lock(&cache->uc_lock);
428 cfs_list_for_each_entry(entry, head, ue_hash) {
429 if (upcall_compare(cache, entry, key, args) == 0) {
436 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
437 "cur %lu, ex %ld/%ld\n",
438 cache->uc_name, entry, entry->ue_key,
439 cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
440 get_seconds(), entry->ue_acquire_expire,
442 UC_CACHE_SET_EXPIRED(entry);
443 if (!cfs_atomic_read(&entry->ue_refcount))
444 free_entry(cache, entry);
446 cfs_spin_unlock(&cache->uc_lock);
448 EXPORT_SYMBOL(upcall_cache_flush_one);
450 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
451 struct upcall_cache_ops *ops)
453 struct upcall_cache *cache;
457 OBD_ALLOC(cache, sizeof(*cache));
459 RETURN(ERR_PTR(-ENOMEM));
461 cfs_spin_lock_init(&cache->uc_lock);
462 cfs_rwlock_init(&cache->uc_upcall_rwlock);
463 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
464 CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
465 strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
466 /* upcall pathname proc tunable */
467 strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
468 cache->uc_entry_expire = 20 * 60 * CFS_HZ;
469 cache->uc_acquire_expire = 30 * CFS_HZ;
474 EXPORT_SYMBOL(upcall_cache_init);
476 void upcall_cache_cleanup(struct upcall_cache *cache)
480 upcall_cache_flush_all(cache);
481 OBD_FREE(cache, sizeof(*cache));
483 EXPORT_SYMBOL(upcall_cache_cleanup);