/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
__u64 key, void *args)
{
- struct upcall_cache_entry *entry;
-
- LIBCFS_ALLOC(entry, sizeof(*entry));
- if (!entry)
- return NULL;
-
- UC_CACHE_SET_NEW(entry);
- CFS_INIT_LIST_HEAD(&entry->ue_hash);
- entry->ue_key = key;
- cfs_atomic_set(&entry->ue_refcount, 0);
- cfs_waitq_init(&entry->ue_waitq);
- if (cache->uc_ops->init_entry)
- cache->uc_ops->init_entry(entry, args);
- return entry;
+ struct upcall_cache_entry *entry;
+
+ LIBCFS_ALLOC(entry, sizeof(*entry));
+ if (!entry)
+ return NULL;
+
+ UC_CACHE_SET_NEW(entry);
+ CFS_INIT_LIST_HEAD(&entry->ue_hash);
+ entry->ue_key = key;
+ atomic_set(&entry->ue_refcount, 0);
+ init_waitqueue_head(&entry->ue_waitq);
+ if (cache->uc_ops->init_entry)
+ cache->uc_ops->init_entry(entry, args);
+ return entry;
}
/* protected by cache lock */
static inline void get_entry(struct upcall_cache_entry *entry)
{
- cfs_atomic_inc(&entry->ue_refcount);
+ atomic_inc(&entry->ue_refcount);
}
static inline void put_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
- (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
- free_entry(cache, entry);
- }
+ if (atomic_dec_and_test(&entry->ue_refcount) &&
+ (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
+ free_entry(cache, entry);
+ }
}
static int check_unlink_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- if (UC_CACHE_IS_VALID(entry) &&
- cfs_time_before(cfs_time_current(), entry->ue_expire))
- return 0;
-
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- if (entry->ue_acquire_expire == 0 ||
- cfs_time_before(cfs_time_current(),
- entry->ue_acquire_expire))
- return 0;
-
- UC_CACHE_SET_EXPIRED(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- } else if (!UC_CACHE_IS_INVALID(entry)) {
- UC_CACHE_SET_EXPIRED(entry);
- }
+ if (UC_CACHE_IS_VALID(entry) &&
+ cfs_time_before(cfs_time_current(), entry->ue_expire))
+ return 0;
+
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ if (entry->ue_acquire_expire == 0 ||
+ cfs_time_before(cfs_time_current(),
+ entry->ue_acquire_expire))
+ return 0;
+
+ UC_CACHE_SET_EXPIRED(entry);
+ wake_up_all(&entry->ue_waitq);
+ } else if (!UC_CACHE_IS_INVALID(entry)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ }
- cfs_list_del_init(&entry->ue_hash);
- if (!cfs_atomic_read(&entry->ue_refcount))
- free_entry(cache, entry);
- return 1;
+ cfs_list_del_init(&entry->ue_hash);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ return 1;
}
static inline int refresh_entry(struct upcall_cache *cache,
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
__u64 key, void *args)
{
- struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
- cfs_list_t *head;
- cfs_waitlink_t wait;
- int rc, found;
- ENTRY;
+ struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+ cfs_list_t *head;
+ wait_queue_t wait;
+ int rc, found;
+ ENTRY;
LASSERT(cache);
entry->ue_acquire_expire =
cfs_time_shift(cache->uc_acquire_expire);
if (rc < 0) {
- UC_CACHE_CLEAR_ACQUIRING(entry);
- UC_CACHE_SET_INVALID(entry);
- cfs_waitq_broadcast(&entry->ue_waitq);
- if (unlikely(rc == -EREMCHG)) {
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ UC_CACHE_CLEAR_ACQUIRING(entry);
+ UC_CACHE_SET_INVALID(entry);
+ wake_up_all(&entry->ue_waitq);
+ if (unlikely(rc == -EREMCHG)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
}
/* someone (and only one) is doing upcall upon this item,
if (UC_CACHE_IS_ACQUIRING(entry)) {
long expiry = (entry == new) ?
cfs_time_seconds(cache->uc_acquire_expire) :
- CFS_MAX_SCHEDULE_TIMEOUT;
- long left;
+ MAX_SCHEDULE_TIMEOUT;
+ long left;
- cfs_waitlink_init(&wait);
- cfs_waitq_add(&entry->ue_waitq, &wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&entry->ue_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&cache->uc_lock);
- left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
expiry);
spin_lock(&cache->uc_lock);
- cfs_waitq_del(&entry->ue_waitq, &wait);
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- /* we're interrupted or upcall failed in the middle */
- rc = left > 0 ? -EINTR : -ETIMEDOUT;
- CERROR("acquire for key "LPU64": error %d\n",
- entry->ue_key, rc);
- put_entry(cache, entry);
- GOTO(out, entry = ERR_PTR(rc));
- }
+ remove_wait_queue(&entry->ue_waitq, &wait);
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ /* we're interrupted or upcall failed in the middle */
+ rc = left > 0 ? -EINTR : -ETIMEDOUT;
+ CERROR("acquire for key "LPU64": error %d\n",
+ entry->ue_key, rc);
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
}
/* invalid means error, don't need to try again */
return;
}
- LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+ LASSERT(atomic_read(&entry->ue_refcount) > 0);
spin_lock(&cache->uc_lock);
put_entry(cache, entry);
spin_unlock(&cache->uc_lock);
}
UC_CACHE_CLEAR_ACQUIRING(entry);
spin_unlock(&cache->uc_lock);
- cfs_waitq_broadcast(&entry->ue_waitq);
+ wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
RETURN(rc);
ENTRY;
spin_lock(&cache->uc_lock);
- for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
- cfs_list_for_each_entry_safe(entry, next,
- &cache->uc_hashtable[i], ue_hash) {
- if (!force && cfs_atomic_read(&entry->ue_refcount)) {
- UC_CACHE_SET_EXPIRED(entry);
- continue;
- }
- LASSERT(!cfs_atomic_read(&entry->ue_refcount));
- free_entry(cache, entry);
- }
- }
+ for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
+ cfs_list_for_each_entry_safe(entry, next,
+ &cache->uc_hashtable[i], ue_hash) {
+ if (!force && atomic_read(&entry->ue_refcount)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ continue;
+ }
+ LASSERT(!atomic_read(&entry->ue_refcount));
+ free_entry(cache, entry);
+ }
+ }
spin_unlock(&cache->uc_lock);
EXIT;
}
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
{
- cfs_list_t *head;
- struct upcall_cache_entry *entry;
- int found = 0;
- ENTRY;
+ cfs_list_t *head;
+ struct upcall_cache_entry *entry;
+ int found = 0;
+ ENTRY;
- head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
spin_lock(&cache->uc_lock);
- cfs_list_for_each_entry(entry, head, ue_hash) {
- if (upcall_compare(cache, entry, key, args) == 0) {
- found = 1;
- break;
- }
- }
+ cfs_list_for_each_entry(entry, head, ue_hash) {
+ if (upcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ break;
+ }
+ }
- if (found) {
- CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
- "cur %lu, ex %ld/%ld\n",
- cache->uc_name, entry, entry->ue_key,
- cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
- cfs_time_current_sec(), entry->ue_acquire_expire,
- entry->ue_expire);
- UC_CACHE_SET_EXPIRED(entry);
- if (!cfs_atomic_read(&entry->ue_refcount))
- free_entry(cache, entry);
- }
+ if (found) {
+ CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
+ "cur %lu, ex %ld/%ld\n",
+ cache->uc_name, entry, entry->ue_key,
+ atomic_read(&entry->ue_refcount), entry->ue_flags,
+ cfs_time_current_sec(), entry->ue_acquire_expire,
+ entry->ue_expire);
+ UC_CACHE_SET_EXPIRED(entry);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ }
spin_unlock(&cache->uc_lock);
}
EXPORT_SYMBOL(upcall_cache_flush_one);