4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/upcall_cache.c
33 * Supplementary groups cache.
35 #define DEBUG_SUBSYSTEM S_SEC
37 #include <libcfs/libcfs.h>
38 #include <uapi/linux/lnet/lnet-types.h>
39 #include <upcall_cache.h>
41 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
42 __u64 key, void *args)
44 struct upcall_cache_entry *entry;
46 LIBCFS_ALLOC(entry, sizeof(*entry));
50 UC_CACHE_SET_NEW(entry);
51 INIT_LIST_HEAD(&entry->ue_hash);
53 atomic_set(&entry->ue_refcount, 0);
54 init_waitqueue_head(&entry->ue_waitq);
55 if (cache->uc_ops->init_entry)
56 cache->uc_ops->init_entry(entry, args);
60 /* protected by cache lock */
61 static void free_entry(struct upcall_cache *cache,
62 struct upcall_cache_entry *entry)
64 if (cache->uc_ops->free_entry)
65 cache->uc_ops->free_entry(cache, entry);
67 list_del(&entry->ue_hash);
68 CDEBUG(D_OTHER, "destroy cache entry %p for key %llu\n",
69 entry, entry->ue_key);
70 LIBCFS_FREE(entry, sizeof(*entry));
73 static inline int upcall_compare(struct upcall_cache *cache,
74 struct upcall_cache_entry *entry,
75 __u64 key, void *args)
77 if (entry->ue_key != key)
80 if (cache->uc_ops->upcall_compare)
81 return cache->uc_ops->upcall_compare(cache, entry, key, args);
86 static inline int downcall_compare(struct upcall_cache *cache,
87 struct upcall_cache_entry *entry,
88 __u64 key, void *args)
90 if (entry->ue_key != key)
93 if (cache->uc_ops->downcall_compare)
94 return cache->uc_ops->downcall_compare(cache, entry, key, args);
99 static inline void get_entry(struct upcall_cache_entry *entry)
101 atomic_inc(&entry->ue_refcount);
104 static inline void put_entry(struct upcall_cache *cache,
105 struct upcall_cache_entry *entry)
107 if (atomic_dec_and_test(&entry->ue_refcount) &&
108 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
109 free_entry(cache, entry);
113 static int check_unlink_entry(struct upcall_cache *cache,
114 struct upcall_cache_entry *entry)
116 time64_t now = ktime_get_seconds();
118 if (UC_CACHE_IS_VALID(entry) && now < entry->ue_expire)
121 if (UC_CACHE_IS_ACQUIRING(entry)) {
122 if (entry->ue_acquire_expire == 0 ||
123 now < entry->ue_acquire_expire)
126 UC_CACHE_SET_EXPIRED(entry);
127 wake_up(&entry->ue_waitq);
128 } else if (!UC_CACHE_IS_INVALID(entry)) {
129 UC_CACHE_SET_EXPIRED(entry);
132 list_del_init(&entry->ue_hash);
133 if (!atomic_read(&entry->ue_refcount))
134 free_entry(cache, entry);
138 static inline int refresh_entry(struct upcall_cache *cache,
139 struct upcall_cache_entry *entry)
141 LASSERT(cache->uc_ops->do_upcall);
142 return cache->uc_ops->do_upcall(cache, entry);
145 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
146 __u64 key, void *args)
148 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
149 struct list_head *head;
150 wait_queue_entry_t wait;
156 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
159 spin_lock(&cache->uc_lock);
160 list_for_each_entry_safe(entry, next, head, ue_hash) {
161 /* check invalid & expired items */
162 if (check_unlink_entry(cache, entry))
164 if (upcall_compare(cache, entry, key, args) == 0) {
172 spin_unlock(&cache->uc_lock);
173 new = alloc_entry(cache, key, args);
175 CERROR("fail to alloc entry\n");
176 RETURN(ERR_PTR(-ENOMEM));
180 list_add(&new->ue_hash, head);
185 free_entry(cache, new);
188 list_move(&entry->ue_hash, head);
192 /* acquire for new one */
193 if (UC_CACHE_IS_NEW(entry)) {
194 UC_CACHE_SET_ACQUIRING(entry);
195 UC_CACHE_CLEAR_NEW(entry);
196 spin_unlock(&cache->uc_lock);
197 rc = refresh_entry(cache, entry);
198 spin_lock(&cache->uc_lock);
199 entry->ue_acquire_expire = ktime_get_seconds() +
200 cache->uc_acquire_expire;
202 UC_CACHE_CLEAR_ACQUIRING(entry);
203 UC_CACHE_SET_INVALID(entry);
204 wake_up(&entry->ue_waitq);
205 if (unlikely(rc == -EREMCHG)) {
206 put_entry(cache, entry);
207 GOTO(out, entry = ERR_PTR(rc));
211 /* someone (and only one) is doing upcall upon this item,
212 * wait it to complete */
213 if (UC_CACHE_IS_ACQUIRING(entry)) {
214 long expiry = (entry == new) ?
215 cfs_time_seconds(cache->uc_acquire_expire) :
216 MAX_SCHEDULE_TIMEOUT;
220 add_wait_queue(&entry->ue_waitq, &wait);
221 set_current_state(TASK_INTERRUPTIBLE);
222 spin_unlock(&cache->uc_lock);
224 left = schedule_timeout(expiry);
226 spin_lock(&cache->uc_lock);
227 remove_wait_queue(&entry->ue_waitq, &wait);
228 if (UC_CACHE_IS_ACQUIRING(entry)) {
229 /* we're interrupted or upcall failed in the middle */
230 rc = left > 0 ? -EINTR : -ETIMEDOUT;
231 CERROR("acquire for key %llu: error %d\n",
233 put_entry(cache, entry);
234 GOTO(out, entry = ERR_PTR(rc));
238 /* invalid means error, don't need to try again */
239 if (UC_CACHE_IS_INVALID(entry)) {
240 put_entry(cache, entry);
241 GOTO(out, entry = ERR_PTR(-EIDRM));
245 * We can't refresh the existing one because some
246 * memory might be shared by multiple processes.
248 if (check_unlink_entry(cache, entry)) {
249 /* if expired, try again. but if this entry is
250 * created by me but too quickly turn to expired
251 * without any error, should at least give a
252 * chance to use it once.
255 put_entry(cache, entry);
256 spin_unlock(&cache->uc_lock);
262 /* Now we know it's good */
264 spin_unlock(&cache->uc_lock);
267 EXPORT_SYMBOL(upcall_cache_get_entry);
269 void upcall_cache_put_entry(struct upcall_cache *cache,
270 struct upcall_cache_entry *entry)
279 LASSERT(atomic_read(&entry->ue_refcount) > 0);
280 spin_lock(&cache->uc_lock);
281 put_entry(cache, entry);
282 spin_unlock(&cache->uc_lock);
285 EXPORT_SYMBOL(upcall_cache_put_entry);
287 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
290 struct upcall_cache_entry *entry = NULL;
291 struct list_head *head;
292 int found = 0, rc = 0;
297 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
299 spin_lock(&cache->uc_lock);
300 list_for_each_entry(entry, head, ue_hash) {
301 if (downcall_compare(cache, entry, key, args) == 0) {
309 CDEBUG(D_OTHER, "%s: upcall for key %llu not expected\n",
310 cache->uc_name, key);
311 /* haven't found, it's possible */
312 spin_unlock(&cache->uc_lock);
317 CDEBUG(D_OTHER, "%s: upcall for key %llu returned %d\n",
318 cache->uc_name, entry->ue_key, err);
319 GOTO(out, rc = -EINVAL);
322 if (!UC_CACHE_IS_ACQUIRING(entry)) {
323 CDEBUG(D_RPCTRACE, "%s: found uptodate entry %p (key %llu)"
324 "\n", cache->uc_name, entry, entry->ue_key);
328 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
329 CERROR("%s: found a stale entry %p (key %llu) in ioctl\n",
330 cache->uc_name, entry, entry->ue_key);
331 GOTO(out, rc = -EINVAL);
334 spin_unlock(&cache->uc_lock);
335 if (cache->uc_ops->parse_downcall)
336 rc = cache->uc_ops->parse_downcall(cache, entry, args);
337 spin_lock(&cache->uc_lock);
341 entry->ue_expire = ktime_get_seconds() + cache->uc_entry_expire;
342 UC_CACHE_SET_VALID(entry);
343 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key %llu\n",
344 cache->uc_name, entry, entry->ue_key);
347 UC_CACHE_SET_INVALID(entry);
348 list_del_init(&entry->ue_hash);
350 UC_CACHE_CLEAR_ACQUIRING(entry);
351 spin_unlock(&cache->uc_lock);
352 wake_up(&entry->ue_waitq);
353 put_entry(cache, entry);
357 EXPORT_SYMBOL(upcall_cache_downcall);
359 void upcall_cache_flush(struct upcall_cache *cache, int force)
361 struct upcall_cache_entry *entry, *next;
365 spin_lock(&cache->uc_lock);
366 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
367 list_for_each_entry_safe(entry, next,
368 &cache->uc_hashtable[i], ue_hash) {
369 if (!force && atomic_read(&entry->ue_refcount)) {
370 UC_CACHE_SET_EXPIRED(entry);
373 LASSERT(!atomic_read(&entry->ue_refcount));
374 free_entry(cache, entry);
377 spin_unlock(&cache->uc_lock);
380 EXPORT_SYMBOL(upcall_cache_flush);
382 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
384 struct list_head *head;
385 struct upcall_cache_entry *entry;
389 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
391 spin_lock(&cache->uc_lock);
392 list_for_each_entry(entry, head, ue_hash) {
393 if (upcall_compare(cache, entry, key, args) == 0) {
400 CWARN("%s: flush entry %p: key %llu, ref %d, fl %x, "
401 "cur %lld, ex %lld/%lld\n",
402 cache->uc_name, entry, entry->ue_key,
403 atomic_read(&entry->ue_refcount), entry->ue_flags,
404 ktime_get_real_seconds(), entry->ue_acquire_expire,
406 UC_CACHE_SET_EXPIRED(entry);
407 if (!atomic_read(&entry->ue_refcount))
408 free_entry(cache, entry);
410 spin_unlock(&cache->uc_lock);
412 EXPORT_SYMBOL(upcall_cache_flush_one);
414 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
415 struct upcall_cache_ops *ops)
417 struct upcall_cache *cache;
421 LIBCFS_ALLOC(cache, sizeof(*cache));
423 RETURN(ERR_PTR(-ENOMEM));
425 spin_lock_init(&cache->uc_lock);
426 init_rwsem(&cache->uc_upcall_rwsem);
427 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
428 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
429 strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
430 /* upcall pathname proc tunable */
431 strlcpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
432 cache->uc_entry_expire = 20 * 60;
433 cache->uc_acquire_expire = 30;
438 EXPORT_SYMBOL(upcall_cache_init);
440 void upcall_cache_cleanup(struct upcall_cache *cache)
444 upcall_cache_flush_all(cache);
445 LIBCFS_FREE(cache, sizeof(*cache));
447 EXPORT_SYMBOL(upcall_cache_cleanup);