* - read_lock_irqsave(lock, f)
* - write_lock_irqsave(lock, f)
* - write_unlock_irqrestore(lock, f)
- *
- * - SPIN_LOCK_UNLOCKED
*/
/*
#define cfs_spin_lock_irqsave(lock, f) spin_lock_irqsave(lock, f)
#define cfs_spin_unlock_irqrestore(lock, f) spin_unlock_irqrestore(lock, f)
-#define CFS_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
-
/*
* rw_semaphore "implementation" (use Linux kernel's primitives)
*
#define cfs_write_lock_bh(lock) write_lock_bh(lock)
#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
-#define CFS_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
+#ifndef DEFINE_RWLOCK
+#define DEFINE_RWLOCK(lock) rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
+#endif
/*
* completion "implementation" (use Linux kernel's primitives)
typedef struct cfs_spin_lock cfs_spinlock_t;
-#define CFS_SPIN_LOCK_UNLOCKED (cfs_spinlock_t) { }
+#define DEFINE_SPINLOCK(lock) cfs_spinlock_t lock = { }
#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
* - cfs_read_unlock_irqrestore(x)
*/
typedef cfs_rw_semaphore_t cfs_rwlock_t;
-#define CFS_RW_LOCK_UNLOCKED (cfs_rwlock_t) { }
+#define DEFINE_RWLOCK(lock) cfs_rwlock_t lock = { }
#define cfs_rwlock_init(pl) cfs_init_rwsem(pl)
#define CFS_DECL_SPIN(name) cfs_spinlock_t name;
#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name;
-#define CFS_SPIN_LOCK_UNLOCKED {0}
+#define DEFINE_SPINLOCK {0}
static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
{
* dispatcher.
*/
/* BH lock! */
-static cfs_spinlock_t lcw_pending_timers_lock = CFS_SPIN_LOCK_UNLOCKED;
-static cfs_list_t lcw_pending_timers = \
- CFS_LIST_HEAD_INIT(lcw_pending_timers);
+static DEFINE_SPINLOCK(lcw_pending_timers_lock);
+static cfs_list_t lcw_pending_timers = CFS_LIST_HEAD_INIT(lcw_pending_timers);
/* Last time a watchdog expired */
static cfs_time_t lcw_last_watchdog_time;
cfs_mem_cache_t *ll_file_data_slab;
CFS_LIST_HEAD(ll_super_blocks);
-cfs_spinlock_t ll_sb_lock = CFS_SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(ll_sb_lock);
#ifndef MS_HAS_NEW_AOPS
extern struct address_space_operations ll_aops;
};
static unsigned int sai_generation = 0;
-static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(sai_generation_lock);
static inline int ll_sa_entry_unlinked(struct ll_sa_entry *entry)
{
extern cfs_atomic_t lmv_object_count;
static CFS_LIST_HEAD(obj_list);
-static cfs_spinlock_t obj_list_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(obj_list_lock);
struct lmv_object *lmv_object_alloc(struct obd_device *obd,
const struct lu_fid *fid,
__u64 obd_max_alloc = 0;
struct lprocfs_stats *obd_memory = NULL;
EXPORT_SYMBOL(obd_memory);
-cfs_spinlock_t obd_updatemax_lock = CFS_SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(obd_updatemax_lock);
/* refine later and change to seqlock or simlar from libcfs */
/* Debugging check only needed during development */
/********************** config llog list **********************/
static CFS_LIST_HEAD(config_llog_list);
-static cfs_spinlock_t config_list_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(config_list_lock);
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
#ifdef __KERNEL__
/* lock for capa hash/capa_list/fo_capa_keys */
-cfs_spinlock_t capa_lock = CFS_SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(capa_lock);
cfs_list_t capa_list[CAPA_SITE_MAX];
static unsigned cl_envs_cached_nr = 0;
static unsigned cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
* for now. */
-static cfs_spinlock_t cl_envs_guard = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cl_envs_guard);
struct cl_env {
void *ce_magic;
struct obd_device *obd_devs[MAX_OBD_DEVICES];
EXPORT_SYMBOL(obd_devs);
cfs_list_t obd_types;
-cfs_rwlock_t obd_dev_lock = CFS_RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(obd_dev_lock);
#ifndef __KERNEL__
__u64 obd_max_pages = 0;
static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(lu_keys_guard);
/**
* Global counter incremented whenever key is registered, unregistered,
static CFS_LIST_HEAD(lu_ref_refs);
static cfs_spinlock_t lu_ref_refs_guard;
static struct lu_ref lu_ref_marker = {
- .lf_guard = CFS_SPIN_LOCK_UNLOCKED,
+ .lf_guard = DEFINE_SPINLOCK(lu_ref_marker.lf_guard),
.lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
.lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
};
#include "gss_api.h"
static CFS_LIST_HEAD(registered_mechs);
-static cfs_spinlock_t registered_mechs_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(registered_mechs_lock);
int lgss_mech_register(struct gss_api_mech *gm)
{
static int pet_state;
static cfs_waitq_t pet_waitq;
CFS_LIST_HEAD(pet_list);
-static cfs_spinlock_t pet_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pet_lock);
int ping_evictor_wake(struct obd_export *exp)
{
cfs_mem_cache_t *qunit_cachep = NULL;
cfs_list_t qunit_hash[NR_DQHASH];
-cfs_spinlock_t qunit_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(qunit_hash_lock);
/* please sync qunit_state with qunit_state_names */
enum qunit_state {
qunit->lq_ctxt = qctxt;
memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
qunit->lq_opc = opc;
- qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
+ cfs_spin_lock_init(&qunit->lq_lock);
QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
qunit->lq_owner = cfs_curproc_pid();
RETURN(qunit);
}
CFS_INIT_LIST_HEAD(&qunit->lq_hash);
- qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
+ cfs_spin_lock_init(&qunit->lq_lock);
cfs_waitq_init(&qunit->lq_waitq);
cfs_atomic_set(&qunit->lq_refcnt, 1);
qunit->lq_ctxt = qctxt;
#ifdef __KERNEL__
static cfs_time_t last_print = 0;
-static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(last_print_lock);
static int filter_quota_setup(struct obd_device *obd)
{
/* lock ordering: mds->mds_qonoff_sem > dquot->dq_mutex > lqs->lqs_lock */
static cfs_list_t lustre_dquot_hash[NR_DQHASH];
-static cfs_rwlock_t dquot_hash_lock = CFS_RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(dquot_hash_lock);
cfs_mem_cache_t *lustre_dquot_cachep;