* Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef _LUCACHE_H
-#define _LUCACHE_H
+#ifndef _UPCALL_CACHE_H
+#define _UPCALL_CACHE_H
#include <libcfs/libcfs.h>
#include <lnet/types.h>
#define UC_CACHE_IS_EXPIRED(i) ((i)->ue_flags & UC_CACHE_EXPIRED)
#define UC_CACHE_IS_VALID(i) ((i)->ue_flags == 0)
-#define UC_CACHE_SET_NEW(i) (i)->ue_flags |= UC_CACHE_NEW
-#define UC_CACHE_SET_INVALID(i) (i)->ue_flags |= UC_CACHE_INVALID
-#define UC_CACHE_SET_ACQUIRING(i) (i)->ue_flags |= UC_CACHE_ACQUIRING
-#define UC_CACHE_SET_EXPIRED(i) (i)->ue_flags |= UC_CACHE_EXPIRED
-#define UC_CACHE_SET_VALID(i) (i)->ue_flags = 0
+#define UC_CACHE_SET_NEW(i) ((i)->ue_flags |= UC_CACHE_NEW)
+#define UC_CACHE_SET_INVALID(i) ((i)->ue_flags |= UC_CACHE_INVALID)
+#define UC_CACHE_SET_ACQUIRING(i) ((i)->ue_flags |= UC_CACHE_ACQUIRING)
+#define UC_CACHE_SET_EXPIRED(i) ((i)->ue_flags |= UC_CACHE_EXPIRED)
+#define UC_CACHE_SET_VALID(i) ((i)->ue_flags = 0)
-#define UC_CACHE_CLEAR_NEW(i) (i)->ue_flags &= ~UC_CACHE_NEW
-#define UC_CACHE_CLEAR_ACQUIRING(i) (i)->ue_flags &= ~UC_CACHE_ACQUIRING
-#define UC_CACHE_CLEAR_INVALID(i) (i)->ue_flags &= ~UC_CACHE_INVALID
-#define UC_CACHE_CLEAR_EXPIRED(i) (i)->ue_flags &= ~UC_CACHE_EXPIRED
+#define UC_CACHE_CLEAR_NEW(i) ((i)->ue_flags &= ~UC_CACHE_NEW)
+#define UC_CACHE_CLEAR_ACQUIRING(i) ((i)->ue_flags &= ~UC_CACHE_ACQUIRING)
+#define UC_CACHE_CLEAR_INVALID(i) ((i)->ue_flags &= ~UC_CACHE_INVALID)
+#define UC_CACHE_CLEAR_EXPIRED(i) ((i)->ue_flags &= ~UC_CACHE_EXPIRED)
struct upcall_cache_entry;
struct md_perm {
- lnet_nid_t mp_nid;
- __u32 mp_perm;
+ lnet_nid_t mp_nid;
+ uint32_t mp_perm;
};
struct md_identity {
struct upcall_cache_entry {
struct list_head ue_hash;
- __u64 ue_key;
+ uint64_t ue_key;
atomic_t ue_refcount;
int ue_flags;
wait_queue_head_t ue_waitq;
struct upcall_cache;
struct upcall_cache_ops {
- void (*init_entry)(struct upcall_cache_entry *, void *args);
- void (*free_entry)(struct upcall_cache *,
- struct upcall_cache_entry *);
- int (*upcall_compare)(struct upcall_cache *,
- struct upcall_cache_entry *,
- __u64 key, void *args);
- int (*downcall_compare)(struct upcall_cache *,
- struct upcall_cache_entry *,
- __u64 key, void *args);
- int (*do_upcall)(struct upcall_cache *,
- struct upcall_cache_entry *);
- int (*parse_downcall)(struct upcall_cache *,
- struct upcall_cache_entry *, void *);
+ void (*init_entry)(struct upcall_cache_entry *, void *args);
+ void (*free_entry)(struct upcall_cache *,
+ struct upcall_cache_entry *);
+ int (*upcall_compare)(struct upcall_cache *,
+ struct upcall_cache_entry *,
+ __u64 key, void *args);
+ int (*downcall_compare)(struct upcall_cache *,
+ struct upcall_cache_entry *,
+ __u64 key, void *args);
+ int (*do_upcall)(struct upcall_cache *,
+ struct upcall_cache_entry *);
+ int (*parse_downcall)(struct upcall_cache *,
+ struct upcall_cache_entry *, void *);
};
struct upcall_cache {
};
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
- __u64 key, void *args);
+ __u64 key, void *args);
void upcall_cache_put_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry);
+ struct upcall_cache_entry *entry);
int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
- void *args);
-void upcall_cache_flush_idle(struct upcall_cache *cache);
-void upcall_cache_flush_all(struct upcall_cache *cache);
+ void *args);
+void upcall_cache_flush(struct upcall_cache *cache, int force);
+
+static inline void upcall_cache_flush_idle(struct upcall_cache *cache)
+{
+ upcall_cache_flush(cache, 0);
+}
+
+static inline void upcall_cache_flush_all(struct upcall_cache *cache)
+{
+ upcall_cache_flush(cache, 1);
+}
+
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args);
struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
- struct upcall_cache_ops *ops);
+ struct upcall_cache_ops *ops);
void upcall_cache_cleanup(struct upcall_cache *cache);
-#if 0
-struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *hash,
- __u64 key, __u32 primary,
- __u32 ngroups, __u32 *groups);
-void upcall_cache_put_entry(struct upcall_cache *hash,
- struct upcall_cache_entry *entry);
-int upcall_cache_downcall(struct upcall_cache *hash, __u32 err, __u64 key,
- __u32 primary, __u32 ngroups, __u32 *groups);
-void upcall_cache_flush_idle(struct upcall_cache *cache);
-void upcall_cache_flush_all(struct upcall_cache *cache);
-struct upcall_cache *upcall_cache_init(const char *name);
-void upcall_cache_cleanup(struct upcall_cache *hash);
-
-#endif
-
/** @} ucache */
-#endif /* _LUCACHE_H */
+#endif /* _UPCALL_CACHE_H */
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * libcfs/libcfs/upcall_cache.c
+ * lustre/obdclass/upcall_cache.c
*
* Supplementary groups cache.
*/
#define DEBUG_SUBSYSTEM S_SEC
-#include <libcfs/lucache.h>
+#include <libcfs/libcfs.h>
#include <lnet/types.h>
+#include <upcall_cache.h>
static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
- __u64 key, void *args)
+ __u64 key, void *args)
{
struct upcall_cache_entry *entry;
/* protected by cache lock */
static void free_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
if (cache->uc_ops->free_entry)
cache->uc_ops->free_entry(cache, entry);
}
static inline int upcall_compare(struct upcall_cache *cache,
- struct upcall_cache_entry *entry,
- __u64 key, void *args)
+ struct upcall_cache_entry *entry,
+ __u64 key, void *args)
{
- if (entry->ue_key != key)
- return -1;
+ if (entry->ue_key != key)
+ return -1;
- if (cache->uc_ops->upcall_compare)
- return cache->uc_ops->upcall_compare(cache, entry, key, args);
+ if (cache->uc_ops->upcall_compare)
+ return cache->uc_ops->upcall_compare(cache, entry, key, args);
- return 0;
+ return 0;
}
static inline int downcall_compare(struct upcall_cache *cache,
- struct upcall_cache_entry *entry,
- __u64 key, void *args)
+ struct upcall_cache_entry *entry,
+ __u64 key, void *args)
{
- if (entry->ue_key != key)
- return -1;
+ if (entry->ue_key != key)
+ return -1;
- if (cache->uc_ops->downcall_compare)
- return cache->uc_ops->downcall_compare(cache, entry, key, args);
+ if (cache->uc_ops->downcall_compare)
+ return cache->uc_ops->downcall_compare(cache, entry, key, args);
- return 0;
+ return 0;
}
static inline void get_entry(struct upcall_cache_entry *entry)
}
static inline int refresh_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- LASSERT(cache->uc_ops->do_upcall);
- return cache->uc_ops->do_upcall(cache, entry);
+ LASSERT(cache->uc_ops->do_upcall);
+ return cache->uc_ops->do_upcall(cache, entry);
}
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
- __u64 key, void *args)
+ __u64 key, void *args)
{
struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
struct list_head *head;
int rc, found;
ENTRY;
- LASSERT(cache);
+ LASSERT(cache);
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
find_again:
}
get_entry(entry);
- /* acquire for new one */
- if (UC_CACHE_IS_NEW(entry)) {
- UC_CACHE_SET_ACQUIRING(entry);
- UC_CACHE_CLEAR_NEW(entry);
+ /* acquire for new one */
+ if (UC_CACHE_IS_NEW(entry)) {
+ UC_CACHE_SET_ACQUIRING(entry);
+ UC_CACHE_CLEAR_NEW(entry);
spin_unlock(&cache->uc_lock);
rc = refresh_entry(cache, entry);
spin_lock(&cache->uc_lock);
- entry->ue_acquire_expire =
- cfs_time_shift(cache->uc_acquire_expire);
- if (rc < 0) {
+ entry->ue_acquire_expire =
+ cfs_time_shift(cache->uc_acquire_expire);
+ if (rc < 0) {
UC_CACHE_CLEAR_ACQUIRING(entry);
UC_CACHE_SET_INVALID(entry);
wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
GOTO(out, entry = ERR_PTR(rc));
}
- }
- }
- /* someone (and only one) is doing upcall upon this item,
- * wait it to complete */
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- long expiry = (entry == new) ?
- cfs_time_seconds(cache->uc_acquire_expire) :
+ }
+ }
+ /* someone (and only one) is doing upcall upon this item,
+ * wait it to complete */
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ long expiry = (entry == new) ?
+ cfs_time_seconds(cache->uc_acquire_expire) :
MAX_SCHEDULE_TIMEOUT;
long left;
put_entry(cache, entry);
GOTO(out, entry = ERR_PTR(rc));
}
- }
-
- /* invalid means error, don't need to try again */
- if (UC_CACHE_IS_INVALID(entry)) {
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(-EIDRM));
- }
-
- /* check expired
- * We can't refresh the existing one because some
- * memory might be shared by multiple processes.
- */
- if (check_unlink_entry(cache, entry)) {
- /* if expired, try again. but if this entry is
- * created by me but too quickly turn to expired
- * without any error, should at least give a
- * chance to use it once.
- */
+ }
+
+ /* invalid means error, don't need to try again */
+ if (UC_CACHE_IS_INVALID(entry)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(-EIDRM));
+ }
+
+ /* check expired
+ * We can't refresh the existing one because some
+ * memory might be shared by multiple processes.
+ */
+ if (check_unlink_entry(cache, entry)) {
+ /* if expired, try again. but if this entry is
+ * created by me but too quickly turn to expired
+ * without any error, should at least give a
+ * chance to use it once.
+ */
if (entry != new) {
put_entry(cache, entry);
spin_unlock(&cache->uc_lock);
}
}
- /* Now we know it's good */
+ /* Now we know it's good */
out:
spin_unlock(&cache->uc_lock);
RETURN(entry);
EXPORT_SYMBOL(upcall_cache_get_entry);
void upcall_cache_put_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
ENTRY;
EXPORT_SYMBOL(upcall_cache_put_entry);
int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
- void *args)
+ void *args)
{
struct upcall_cache_entry *entry = NULL;
struct list_head *head;
}
}
- if (!found) {
- CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
- cache->uc_name, key);
- /* haven't found, it's possible */
+ if (!found) {
+ CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
+ cache->uc_name, key);
+ /* haven't found, it's possible */
spin_unlock(&cache->uc_lock);
- RETURN(-EINVAL);
- }
-
- if (err) {
- CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
- cache->uc_name, entry->ue_key, err);
- GOTO(out, rc = -EINVAL);
- }
-
- if (!UC_CACHE_IS_ACQUIRING(entry)) {
- CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
- cache->uc_name, entry, entry->ue_key);
- GOTO(out, rc = 0);
- }
-
- if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
- CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
- cache->uc_name, entry, entry->ue_key);
- GOTO(out, rc = -EINVAL);
- }
+ RETURN(-EINVAL);
+ }
+
+ if (err) {
+ CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
+ cache->uc_name, entry->ue_key, err);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (!UC_CACHE_IS_ACQUIRING(entry)) {
+ CDEBUG(D_RPCTRACE, "%s: found uptodate entry %p (key "LPU64")"
+ "\n", cache->uc_name, entry, entry->ue_key);
+ GOTO(out, rc = 0);
+ }
+
+ if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
+ CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
+ cache->uc_name, entry, entry->ue_key);
+ GOTO(out, rc = -EINVAL);
+ }
spin_unlock(&cache->uc_lock);
if (cache->uc_ops->parse_downcall)
rc = cache->uc_ops->parse_downcall(cache, entry, args);
spin_lock(&cache->uc_lock);
- if (rc)
- GOTO(out, rc);
+ if (rc)
+ GOTO(out, rc);
- entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
- UC_CACHE_SET_VALID(entry);
- CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
- cache->uc_name, entry, entry->ue_key);
+ entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
+ UC_CACHE_SET_VALID(entry);
+ CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
+ cache->uc_name, entry, entry->ue_key);
out:
if (rc) {
UC_CACHE_SET_INVALID(entry);
}
EXPORT_SYMBOL(upcall_cache_downcall);
-static void cache_flush(struct upcall_cache *cache, int force)
+void upcall_cache_flush(struct upcall_cache *cache, int force)
{
struct upcall_cache_entry *entry, *next;
int i;
spin_unlock(&cache->uc_lock);
EXIT;
}
-
-void upcall_cache_flush_idle(struct upcall_cache *cache)
-{
- cache_flush(cache, 0);
-}
-EXPORT_SYMBOL(upcall_cache_flush_idle);
-
-void upcall_cache_flush_all(struct upcall_cache *cache)
-{
- cache_flush(cache, 1);
-}
-EXPORT_SYMBOL(upcall_cache_flush_all);
+EXPORT_SYMBOL(upcall_cache_flush);
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
{
EXPORT_SYMBOL(upcall_cache_flush_one);
struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
- struct upcall_cache_ops *ops)
+ struct upcall_cache_ops *ops)
{
struct upcall_cache *cache;
int i;
spin_lock_init(&cache->uc_lock);
rwlock_init(&cache->uc_upcall_rwlock);
- for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
+ for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
INIT_LIST_HEAD(&cache->uc_hashtable[i]);
strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
/* upcall pathname proc tunable */
void upcall_cache_cleanup(struct upcall_cache *cache)
{
- if (!cache)
- return;
- upcall_cache_flush_all(cache);
- LIBCFS_FREE(cache, sizeof(*cache));
+ if (!cache)
+ return;
+ upcall_cache_flush_all(cache);
+ LIBCFS_FREE(cache, sizeof(*cache));
}
EXPORT_SYMBOL(upcall_cache_cleanup);