4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/upcall_cache.c
34 * Supplementary groups cache.
36 #define DEBUG_SUBSYSTEM S_SEC
38 #include <libcfs/linux/linux-misc.h>
39 #include <libcfs/libcfs.h>
40 #include <uapi/linux/lnet/lnet-types.h>
41 #include <upcall_cache.h>
43 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
44 __u64 key, void *args)
46 struct upcall_cache_entry *entry;
48 LIBCFS_ALLOC(entry, sizeof(*entry));
52 UC_CACHE_SET_NEW(entry);
53 INIT_LIST_HEAD(&entry->ue_hash);
55 atomic_set(&entry->ue_refcount, 0);
56 init_waitqueue_head(&entry->ue_waitq);
57 if (cache->uc_ops->init_entry)
58 cache->uc_ops->init_entry(entry, args);
62 /* protected by cache lock */
63 static void free_entry(struct upcall_cache *cache,
64 struct upcall_cache_entry *entry)
66 if (cache->uc_ops->free_entry)
67 cache->uc_ops->free_entry(cache, entry);
69 list_del(&entry->ue_hash);
70 CDEBUG(D_OTHER, "destroy cache entry %p for key %llu\n",
71 entry, entry->ue_key);
72 LIBCFS_FREE(entry, sizeof(*entry));
75 static inline int upcall_compare(struct upcall_cache *cache,
76 struct upcall_cache_entry *entry,
77 __u64 key, void *args)
79 if (entry->ue_key != key)
82 if (cache->uc_ops->upcall_compare)
83 return cache->uc_ops->upcall_compare(cache, entry, key, args);
88 static inline int downcall_compare(struct upcall_cache *cache,
89 struct upcall_cache_entry *entry,
90 __u64 key, void *args)
92 if (entry->ue_key != key)
95 if (cache->uc_ops->downcall_compare)
96 return cache->uc_ops->downcall_compare(cache, entry, key, args);
101 static inline void get_entry(struct upcall_cache_entry *entry)
103 atomic_inc(&entry->ue_refcount);
106 static inline void put_entry(struct upcall_cache *cache,
107 struct upcall_cache_entry *entry)
109 if (atomic_dec_and_test(&entry->ue_refcount) &&
110 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
111 free_entry(cache, entry);
115 static int check_unlink_entry(struct upcall_cache *cache,
116 struct upcall_cache_entry *entry)
118 if (UC_CACHE_IS_VALID(entry) &&
119 cfs_time_before(cfs_time_current(), entry->ue_expire))
122 if (UC_CACHE_IS_ACQUIRING(entry)) {
123 if (entry->ue_acquire_expire == 0 ||
124 cfs_time_before(cfs_time_current(),
125 entry->ue_acquire_expire))
128 UC_CACHE_SET_EXPIRED(entry);
129 wake_up_all(&entry->ue_waitq);
130 } else if (!UC_CACHE_IS_INVALID(entry)) {
131 UC_CACHE_SET_EXPIRED(entry);
134 list_del_init(&entry->ue_hash);
135 if (!atomic_read(&entry->ue_refcount))
136 free_entry(cache, entry);
140 static inline int refresh_entry(struct upcall_cache *cache,
141 struct upcall_cache_entry *entry)
143 LASSERT(cache->uc_ops->do_upcall);
144 return cache->uc_ops->do_upcall(cache, entry);
147 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
148 __u64 key, void *args)
150 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
151 struct list_head *head;
152 wait_queue_entry_t wait;
158 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
161 spin_lock(&cache->uc_lock);
162 list_for_each_entry_safe(entry, next, head, ue_hash) {
163 /* check invalid & expired items */
164 if (check_unlink_entry(cache, entry))
166 if (upcall_compare(cache, entry, key, args) == 0) {
174 spin_unlock(&cache->uc_lock);
175 new = alloc_entry(cache, key, args);
177 CERROR("fail to alloc entry\n");
178 RETURN(ERR_PTR(-ENOMEM));
182 list_add(&new->ue_hash, head);
187 free_entry(cache, new);
190 list_move(&entry->ue_hash, head);
194 /* acquire for new one */
195 if (UC_CACHE_IS_NEW(entry)) {
196 UC_CACHE_SET_ACQUIRING(entry);
197 UC_CACHE_CLEAR_NEW(entry);
198 spin_unlock(&cache->uc_lock);
199 rc = refresh_entry(cache, entry);
200 spin_lock(&cache->uc_lock);
201 entry->ue_acquire_expire =
202 cfs_time_shift(cache->uc_acquire_expire);
204 UC_CACHE_CLEAR_ACQUIRING(entry);
205 UC_CACHE_SET_INVALID(entry);
206 wake_up_all(&entry->ue_waitq);
207 if (unlikely(rc == -EREMCHG)) {
208 put_entry(cache, entry);
209 GOTO(out, entry = ERR_PTR(rc));
213 /* someone (and only one) is doing upcall upon this item,
214 * wait it to complete */
215 if (UC_CACHE_IS_ACQUIRING(entry)) {
216 long expiry = (entry == new) ?
217 cfs_time_seconds(cache->uc_acquire_expire) :
218 MAX_SCHEDULE_TIMEOUT;
221 init_waitqueue_entry(&wait, current);
222 add_wait_queue(&entry->ue_waitq, &wait);
223 set_current_state(TASK_INTERRUPTIBLE);
224 spin_unlock(&cache->uc_lock);
226 left = schedule_timeout(expiry);
228 spin_lock(&cache->uc_lock);
229 remove_wait_queue(&entry->ue_waitq, &wait);
230 if (UC_CACHE_IS_ACQUIRING(entry)) {
231 /* we're interrupted or upcall failed in the middle */
232 rc = left > 0 ? -EINTR : -ETIMEDOUT;
233 CERROR("acquire for key %llu: error %d\n",
235 put_entry(cache, entry);
236 GOTO(out, entry = ERR_PTR(rc));
240 /* invalid means error, don't need to try again */
241 if (UC_CACHE_IS_INVALID(entry)) {
242 put_entry(cache, entry);
243 GOTO(out, entry = ERR_PTR(-EIDRM));
247 * We can't refresh the existing one because some
248 * memory might be shared by multiple processes.
250 if (check_unlink_entry(cache, entry)) {
251 /* if expired, try again. but if this entry is
252 * created by me but too quickly turn to expired
253 * without any error, should at least give a
254 * chance to use it once.
257 put_entry(cache, entry);
258 spin_unlock(&cache->uc_lock);
264 /* Now we know it's good */
266 spin_unlock(&cache->uc_lock);
269 EXPORT_SYMBOL(upcall_cache_get_entry);
271 void upcall_cache_put_entry(struct upcall_cache *cache,
272 struct upcall_cache_entry *entry)
281 LASSERT(atomic_read(&entry->ue_refcount) > 0);
282 spin_lock(&cache->uc_lock);
283 put_entry(cache, entry);
284 spin_unlock(&cache->uc_lock);
287 EXPORT_SYMBOL(upcall_cache_put_entry);
289 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
292 struct upcall_cache_entry *entry = NULL;
293 struct list_head *head;
294 int found = 0, rc = 0;
299 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
301 spin_lock(&cache->uc_lock);
302 list_for_each_entry(entry, head, ue_hash) {
303 if (downcall_compare(cache, entry, key, args) == 0) {
311 CDEBUG(D_OTHER, "%s: upcall for key %llu not expected\n",
312 cache->uc_name, key);
313 /* haven't found, it's possible */
314 spin_unlock(&cache->uc_lock);
319 CDEBUG(D_OTHER, "%s: upcall for key %llu returned %d\n",
320 cache->uc_name, entry->ue_key, err);
321 GOTO(out, rc = -EINVAL);
324 if (!UC_CACHE_IS_ACQUIRING(entry)) {
325 CDEBUG(D_RPCTRACE, "%s: found uptodate entry %p (key %llu)"
326 "\n", cache->uc_name, entry, entry->ue_key);
330 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
331 CERROR("%s: found a stale entry %p (key %llu) in ioctl\n",
332 cache->uc_name, entry, entry->ue_key);
333 GOTO(out, rc = -EINVAL);
336 spin_unlock(&cache->uc_lock);
337 if (cache->uc_ops->parse_downcall)
338 rc = cache->uc_ops->parse_downcall(cache, entry, args);
339 spin_lock(&cache->uc_lock);
343 entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
344 UC_CACHE_SET_VALID(entry);
345 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key %llu\n",
346 cache->uc_name, entry, entry->ue_key);
349 UC_CACHE_SET_INVALID(entry);
350 list_del_init(&entry->ue_hash);
352 UC_CACHE_CLEAR_ACQUIRING(entry);
353 spin_unlock(&cache->uc_lock);
354 wake_up_all(&entry->ue_waitq);
355 put_entry(cache, entry);
359 EXPORT_SYMBOL(upcall_cache_downcall);
361 void upcall_cache_flush(struct upcall_cache *cache, int force)
363 struct upcall_cache_entry *entry, *next;
367 spin_lock(&cache->uc_lock);
368 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
369 list_for_each_entry_safe(entry, next,
370 &cache->uc_hashtable[i], ue_hash) {
371 if (!force && atomic_read(&entry->ue_refcount)) {
372 UC_CACHE_SET_EXPIRED(entry);
375 LASSERT(!atomic_read(&entry->ue_refcount));
376 free_entry(cache, entry);
379 spin_unlock(&cache->uc_lock);
382 EXPORT_SYMBOL(upcall_cache_flush);
384 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
386 struct list_head *head;
387 struct upcall_cache_entry *entry;
391 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
393 spin_lock(&cache->uc_lock);
394 list_for_each_entry(entry, head, ue_hash) {
395 if (upcall_compare(cache, entry, key, args) == 0) {
402 CWARN("%s: flush entry %p: key %llu, ref %d, fl %x, "
403 "cur %lu, ex %ld/%ld\n",
404 cache->uc_name, entry, entry->ue_key,
405 atomic_read(&entry->ue_refcount), entry->ue_flags,
406 cfs_time_current_sec(), entry->ue_acquire_expire,
408 UC_CACHE_SET_EXPIRED(entry);
409 if (!atomic_read(&entry->ue_refcount))
410 free_entry(cache, entry);
412 spin_unlock(&cache->uc_lock);
414 EXPORT_SYMBOL(upcall_cache_flush_one);
416 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
417 struct upcall_cache_ops *ops)
419 struct upcall_cache *cache;
423 LIBCFS_ALLOC(cache, sizeof(*cache));
425 RETURN(ERR_PTR(-ENOMEM));
427 spin_lock_init(&cache->uc_lock);
428 init_rwsem(&cache->uc_upcall_rwsem);
429 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
430 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
431 strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
432 /* upcall pathname proc tunable */
433 strlcpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
434 cache->uc_entry_expire = 20 * 60;
435 cache->uc_acquire_expire = 30;
440 EXPORT_SYMBOL(upcall_cache_init);
442 void upcall_cache_cleanup(struct upcall_cache *cache)
446 upcall_cache_flush_all(cache);
447 LIBCFS_FREE(cache, sizeof(*cache));
449 EXPORT_SYMBOL(upcall_cache_cleanup);