#define CFS_LIST_HEAD_INIT(name) { &(name), &(name) }
#define CFS_LIST_HEAD(name) \
- struct list_head name = LIST_HEAD_INIT(name)
+ struct list_head name = CFS_LIST_HEAD_INIT(name)
#define CFS_INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (ptr); (ptr)->prev = (ptr); \
} while (0)
-#ifndef __APPLE__
-#define LIST_HEAD(n) CFS_LIST_HEAD(n)
-#endif
-
-#define LIST_HEAD_INIT(n) CFS_LIST_HEAD_INIT(n)
-#define INIT_LIST_HEAD(p) CFS_INIT_LIST_HEAD(p)
-
/*
* Insert a new entry between two known consecutive entries.
*
int nicount = 0;
char *nets = lnet_get_networks();
- INIT_LIST_HEAD(&nilist);
+ CFS_INIT_LIST_HEAD(&nilist);
if (nets == NULL)
goto failed;
int i;
lnet_text_buf_t *ltb;
- INIT_LIST_HEAD(&pending);
+ CFS_INIT_LIST_HEAD(&pending);
/* Split 'str' into separate commands */
for (;;) {
int nob;
int scanned;
- INIT_LIST_HEAD(&pending);
+ CFS_INIT_LIST_HEAD(&pending);
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return -ENOMEM;
}
- INIT_LIST_HEAD(&rnet->lrn_routes);
+ CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
rnet->lrn_net = net;
rnet->lrn_hops = hops;
spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
fld->lcf_flags = LUSTRE_FLD_INIT;
- INIT_LIST_HEAD(&fld->lcf_targets);
+ CFS_INIT_LIST_HEAD(&fld->lcf_targets);
#ifdef __KERNEL__
cache_size = FLD_CLIENT_CACHE_SIZE /
#define DECLARE_WAIT_QUEUE_HEAD(HEAD) \
wait_queue_head_t HEAD = { \
- .sleepers = LIST_HEAD_INIT(HEAD.sleepers) \
+ .sleepers = CFS_LIST_HEAD_INIT(HEAD.sleepers) \
}
-#define init_waitqueue_head(l) INIT_LIST_HEAD(&(l)->sleepers)
+#define init_waitqueue_head(l) CFS_INIT_LIST_HEAD(&(l)->sleepers)
#define wake_up(l) do { int a; a++; } while (0)
#define TASK_INTERRUPTIBLE 0
#define TASK_UNINTERRUPTIBLE 1
static inline int init_timer(struct timer_list *l)
{
- INIT_LIST_HEAD(&l->tl_list);
+ CFS_INIT_LIST_HEAD(&l->tl_list);
return 0;
}
if (ocapa) {
atomic_set(&ocapa->c_refc, 0);
spin_lock_init(&ocapa->c_lock);
- INIT_LIST_HEAD(&ocapa->c_list);
+ CFS_INIT_LIST_HEAD(&ocapa->c_list);
ocapa->c_site = site;
}
return ocapa;
void ldlm_reprocess_all(struct ldlm_resource *res)
{
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
int rc;
ENTRY;
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
__u32 *flags)
{
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
struct ldlm_resource *res;
struct ldlm_namespace *ns;
int granted = 0;
struct ldlm_lock *lock, *next;
ENTRY;
- INIT_LIST_HEAD(&rpc_list);
+ CFS_INIT_LIST_HEAD(&rpc_list);
spin_lock(&exp->exp_ldlm_data.led_lock);
list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
if (!sbi)
RETURN(-ENOMEM);
- INIT_LIST_HEAD(&sbi->ll_conn_chain);
+ CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
ll_generate_random_uuid(uuid);
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
extern atomic_t obj_cache_count;
/* object list and its guard. */
-static LIST_HEAD(obj_list);
+static CFS_LIST_HEAD(obj_list);
static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
/* creates new obj on passed @fid and @mea. */
lov->lov_tgt_size = 0;
sema_init(&lov->lov_lock, 1);
atomic_set(&lov->lov_refcount, 0);
- INIT_LIST_HEAD(&lov->lov_qos.lq_oss_list);
+ CFS_INIT_LIST_HEAD(&lov->lov_qos.lq_oss_list);
init_rwsem(&lov->lov_qos.lq_rw_sem);
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_dirty_rr = 1;
}
/********************** config llog list **********************/
-static struct list_head config_llog_list = LIST_HEAD_INIT(config_llog_list);
+static CFS_LIST_HEAD(config_llog_list);
static spinlock_t config_list_lock = SPIN_LOCK_UNLOCKED;
/* Take a reference to a config log */
printk(KERN_INFO " Build Version: "BUILD_VERSION"\n");
for (i = CAPA_SITE_CLIENT; i < CAPA_SITE_MAX; i++)
- INIT_LIST_HEAD(&capa_list[i]);
+ CFS_INIT_LIST_HEAD(&capa_list[i]);
#else
CDEBUG(D_INFO, "Lustre: OBD class driver, info@clusterfs.com\n");
CDEBUG(D_INFO, " Lustre Version: "LUSTRE_VERSION_STRING"\n");
return err;
spin_lock_init(&obd_dev_lock);
- INIT_LIST_HEAD(&obd_types);
+ CFS_INIT_LIST_HEAD(&obd_types);
err = cfs_psdev_register(&obd_psdev);
if (err) {
void obd_zombie_impexp_init(void)
{
- INIT_LIST_HEAD(&obd_zombie_imports);
- INIT_LIST_HEAD(&obd_zombie_exports);
+ CFS_INIT_LIST_HEAD(&obd_zombie_imports);
+ CFS_INIT_LIST_HEAD(&obd_zombie_exports);
spin_lock_init(&obd_zombie_impexp_lock);
}
/*********** mount lookup *********/
DECLARE_MUTEX(lustre_mount_info_lock);
-struct list_head server_mount_info_list = LIST_HEAD_INIT(server_mount_info_list);
+static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
{
static int filter_init_export(struct obd_export *exp)
{
spin_lock_init(&exp->exp_filter_data.fed_lock);
- INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
+ CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
struct ldlm_lock **lockp, void *req_cookie,
ldlm_mode_t mode, int flags, void *data)
{
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
struct ptlrpc_request *req = req_cookie;
struct ldlm_lock *lock = *lockp, *l = NULL;
struct ldlm_resource *res = lock->l_resource;
filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
- INIT_LIST_HEAD(&filter->fo_llog_list);
+ CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
spin_lock_init(&filter->fo_llog_list_lock);
filter->fo_sptlrpc_lock = RW_LOCK_UNLOCKED;
sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
filter->fo_fl_oss_capa = 0;
- INIT_LIST_HEAD(&filter->fo_capa_keys);
+ CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
filter->fo_capa_hash = init_capa_hash();
if (filter->fo_capa_hash == NULL)
GOTO(err_ops, rc = -ENOMEM);
aa->aa_resends = 0;
aa->aa_ppga = pga;
aa->aa_cli = cli;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
*reqp = req;
RETURN(0);
new_aa = (struct osc_brw_async_args *)&new_req->rq_async_args;
- INIT_LIST_HEAD(&new_aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request) {
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = (struct osc_brw_async_args *)&req->rq_async_args;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_splice(rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(rpc_list);
+ CFS_INIT_LIST_HEAD(rpc_list);
out:
if (IS_ERR(req)) {
atomic_set(&c->c_refcount, 1);
c->c_peer = peer;
c->c_self = self;
- INIT_HLIST_NODE(&c->c_hash);
- INIT_LIST_HEAD(&c->c_link);
+ INIT_HLIST_NODE(&c->c_hash);
+ CFS_INIT_LIST_HEAD(&c->c_link);
if (uuid != NULL)
obd_str2uuid(&c->c_remote_uuid, uuid->uuid);
null_sec.ps_dying = 0;
spin_lock_init(&null_sec.ps_lock);
atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
- INIT_LIST_HEAD(&null_sec.ps_gc_list);
+ CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
null_sec.ps_gc_interval = 0;
null_sec.ps_gc_next = 0;
PTLRPC_CTX_UPTODATE;
null_cli_ctx.cc_vcred.vc_uid = 0;
spin_lock_init(&null_cli_ctx.cc_lock);
- INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
- INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
+ CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+ CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
int sptlrpc_null_init(void)
ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
ctx->cc_vcred.vc_uid = 0;
spin_lock_init(&ctx->cc_lock);
- INIT_LIST_HEAD(&ctx->cc_req_list);
- INIT_LIST_HEAD(&ctx->cc_gc_chain);
+ CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
+ CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
plsec->pls_ctx = ctx;
atomic_inc(&plsec->pls_base.ps_nctx);
sec->ps_import = class_import_get(imp);
sec->ps_flvr = *sf;
sec->ps_lock = SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&sec->ps_gc_list);
+ CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
spin_lock(&qunit_hash_lock);
for (i = 0; i < NR_DQHASH; i++)
- INIT_LIST_HEAD(qunit_hash + i);
+ CFS_INIT_LIST_HEAD(qunit_hash + i);
spin_unlock(&qunit_hash_lock);
RETURN(0);
}
if (qunit == NULL)
RETURN(NULL);
- INIT_LIST_HEAD(&qunit->lq_hash);
- INIT_LIST_HEAD(&qunit->lq_waiters);
+ CFS_INIT_LIST_HEAD(&qunit->lq_hash);
+ CFS_INIT_LIST_HEAD(&qunit->lq_waiters);
atomic_set(&qunit->lq_refcnt, 1);
qunit->lq_ctxt = qctxt;
memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
int rc = 0;
ENTRY;
- INIT_LIST_HEAD(&qw.qw_entry);
+ CFS_INIT_LIST_HEAD(&qw.qw_entry);
init_waitqueue_head(&qw.qw_waitq);
qw.qw_rc = 0;
struct l_wait_info lwi = { 0 };
ENTRY;
- INIT_LIST_HEAD(&qw.qw_entry);
+ CFS_INIT_LIST_HEAD(&qw.qw_entry);
init_waitqueue_head(&qw.qw_waitq);
qw.qw_rc = 0;
}
LASSERT(dqopt->files[type] != NULL);
- INIT_LIST_HEAD(&id_list);
+ CFS_INIT_LIST_HEAD(&id_list);
#ifndef KERNEL_SUPPORTS_QUOTA_READ
rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
#else
if(!oqi)
RETURN(NULL);
- INIT_LIST_HEAD(&oqi->oqi_hash);
+ CFS_INIT_LIST_HEAD(&oqi->oqi_hash);
oqi->oqi_cli = cli;
oqi->oqi_id = id;
oqi->oqi_type = type;
RETURN(-ENOMEM);
for (i = 0; i < NR_DQHASH; i++)
- INIT_LIST_HEAD(qinfo_hash + i);
+ CFS_INIT_LIST_HEAD(qinfo_hash + i);
RETURN(0);
}
return (-ENOMEM);
for (i = 0; i < NR_DQHASH; i++) {
- INIT_LIST_HEAD(lustre_dquot_hash + i);
+ CFS_INIT_LIST_HEAD(lustre_dquot_hash + i);
}
RETURN(0);
}
if (dquot == NULL)
RETURN(NULL);
- INIT_LIST_HEAD(&dquot->dq_hash);
+ CFS_INIT_LIST_HEAD(&dquot->dq_hash);
init_mutex_locked(&dquot->dq_sem);
dquot->dq_refcnt = 1;
dquot->dq_info = lqi;
up(&mds->mds_qonoff_sem);
continue;
}
- INIT_LIST_HEAD(&id_list);
+ CFS_INIT_LIST_HEAD(&id_list);
rc = fsfilt_qids(obd, qinfo->qi_files[type], NULL, type,
&id_list);
up(&mds->mds_qonoff_sem);
int hit, valid;
} *it_array;
static int it_count;
-static struct list_head header = LIST_HEAD_INIT(header);
+static CFS_LIST_HEAD(header);
static unsigned long max_count = ULONG_MAX & ALIGN_MASK;
static int have_wide_lock = 0;