int size;
} kmem_cache_t;
#define SLAB_HWCACHE_ALIGN 0
+#define SLAB_DESTROY_BY_RCU 0
static inline kmem_cache_t *
kmem_cache_create(const char *name, size_t objsize, size_t cdum,
unsigned long d,
}
#ifdef LIBLUSTRE_POSIX_ACL
- #ifndef posix_acl_xattr_entry
+ #ifndef posix_acl_xattr_entry
#define posix_acl_xattr_entry xattr_acl_entry
#endif
- #ifndef posix_acl_xattr_header
+ #ifndef posix_acl_xattr_header
#define posix_acl_xattr_header xattr_acl_header
#endif
#ifndef posix_acl_xattr_size
LDLM_LOCK_GET(lock);
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_DEBUG(lock, "prolong the busy lock");
- ldlm_refresh_waiting_lock(lock,
+ ldlm_refresh_waiting_lock(lock,
ldlm_get_enq_timeout(lock));
spin_lock_bh(&waiting_locks_spinlock);
RETURN(0);
}
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
#endif
-/*
- * Export handle<->lock hash operations.
+/*
+ * Export handle<->lock hash operations.
*/
static unsigned
ldlm_export_lock_hash(lustre_hash_t *lh, void *key, unsigned mask)
return -ENOMEM;
ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN);
+ sizeof(struct ldlm_lock), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
if (ldlm_lock_slab == NULL) {
cfs_mem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;