ofd = ofd_dev(obd->obd_lu_dev);
read_lock(&ofd->ofd_seq_list_lock);
- cfs_list_for_each_entry(oseq, &ofd->ofd_seq_list, os_list) {
+ list_for_each_entry(oseq, &ofd->ofd_seq_list, os_list) {
__u64 seq;
seq = ostid_seq(&oseq->os_oi) == 0 ?
OBD_ALLOC_PTR(k);
if (!k)
RETURN(-ENOMEM);
- CFS_INIT_LIST_HEAD(&k->k_list);
+ INIT_LIST_HEAD(&k->k_list);
}
spin_lock(&capa_lock);
}
struct locked_region {
- cfs_list_t list;
+ struct list_head list;
struct lustre_handle lh;
};
static int lock_region(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
unsigned long long begin, unsigned long long end,
- cfs_list_t *locked)
+ struct list_head *locked)
{
struct locked_region *region = NULL;
__u64 flags = 0;
CDEBUG(D_OTHER, "ost lock [%llu,%llu], lh=%p\n", begin, end,
®ion->lh);
- cfs_list_add(®ion->list, locked);
+ list_add(®ion->list, locked);
return 0;
}
static int lock_zero_regions(struct ldlm_namespace *ns,
struct ldlm_res_id *res_id,
struct ll_user_fiemap *fiemap,
- cfs_list_t *locked)
+ struct list_head *locked)
{
__u64 begin = fiemap->fm_start;
unsigned int i;
RETURN(rc);
}
-static void unlock_zero_regions(struct ldlm_namespace *ns, cfs_list_t *locked)
+static void
+unlock_zero_regions(struct ldlm_namespace *ns, struct list_head *locked)
{
struct locked_region *entry, *temp;
- cfs_list_for_each_entry_safe(entry, temp, locked, list) {
+ list_for_each_entry_safe(entry, temp, locked, list) {
CDEBUG(D_OTHER, "ost unlock lh=%p\n", &entry->lh);
tgt_extent_unlock(&entry->lh, LCK_PR);
- cfs_list_del(&entry->list);
+ list_del(&entry->list);
OBD_FREE_PTR(entry);
}
}
* flushed back from client, then call fiemap again. */
if (fm_key->oa.o_valid & OBD_MD_FLFLAGS &&
fm_key->oa.o_flags & OBD_FL_SRVLOCK) {
- cfs_list_t locked = CFS_LIST_HEAD_INIT(locked);
+ struct list_head locked;
+ INIT_LIST_HEAD(&locked);
ost_fid_build_resid(fid, &fti->fti_resid);
rc = lock_zero_regions(ofd->ofd_namespace,
&fti->fti_resid, fiemap,
&locked);
- if (rc == 0 && !cfs_list_empty(&locked)) {
+ if (rc == 0 && !list_empty(&locked)) {
rc = ofd_fiemap_get(tsi->tsi_env, ofd, fid,
fiemap);
unlock_zero_regions(ofd->ofd_namespace,
init_rwsem(&m->ofd_lastid_rwsem);
obd->u.filter.fo_fl_oss_capa = 0;
- CFS_INIT_LIST_HEAD(&obd->u.filter.fo_capa_keys);
+ INIT_LIST_HEAD(&obd->u.filter.fo_capa_keys);
obd->u.filter.fo_capa_hash = init_capa_hash();
if (obd->u.filter.fo_capa_hash == NULL)
RETURN(-ENOMEM);
if (interval_high(n) <= size)
return INTERVAL_ITER_STOP;
- cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
+ list_for_each_entry(lck, &node->li_group, l_sl_policy) {
/* Don't send glimpse ASTs to liblustre clients.
* They aren't listening for them, and they do
* entirely synchronous I/O anyways. */
[DLM_REPLY_REC_OFF] = sizeof(*reply_lvb)
};
struct ldlm_glimpse_work gl_work;
- CFS_LIST_HEAD(gl_list);
+ struct list_head gl_list;
ENTRY;
+ INIT_LIST_HEAD(&gl_list);
lock->l_lvb_type = LVB_T_OST;
policy = ldlm_get_processing_policy(res);
LASSERT(policy != NULL);
gl_work.gl_lock = LDLM_LOCK_GET(l);
/* The glimpse callback is sent to one single extent lock. As a result,
* the gl_work list is just composed of one element */
- cfs_list_add_tail(&gl_work.gl_list, &gl_list);
+ list_add_tail(&gl_work.gl_list, &gl_list);
/* There is actually no need for a glimpse descriptor when glimpsing
* extent locks */
gl_work.gl_desc = NULL;
rc = ldlm_glimpse_locks(res, &gl_list); /* this will update the LVB */
- if (!cfs_list_empty(&gl_list))
+ if (!list_empty(&gl_list))
LDLM_LOCK_RELEASE(l);
lock_res(res);
/* XXX when we have persistent reservations and the handle
* is stored herein we need to drop it here. */
fed->fed_mod_count--;
- cfs_list_del(&fmd->fmd_list);
+ list_del(&fmd->fmd_list);
OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
}
}
cfs_time_t now = cfs_time_current();
- cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
if (fmd == keep)
break;
fed->fed_mod_count < ofd->ofd_fmd_max_num)
break;
- cfs_list_del_init(&fmd->fmd_list);
+ list_del_init(&fmd->fmd_list);
ofd_fmd_put_nolock(exp, fmd); /* list reference */
}
}
assert_spin_locked(&fed->fed_lock);
- cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
+ list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
if (lu_fid_eq(&fmd->fmd_fid, fid)) {
found = fmd;
- cfs_list_del(&fmd->fmd_list);
- cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
+ list_del(&fmd->fmd_list);
+ list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
fmd->fmd_expire = cfs_time_add(now, ofd->ofd_fmd_max_age);
break;
}
found = ofd_fmd_find_nolock(exp, fid);
if (fmd_new) {
if (found == NULL) {
- cfs_list_add_tail(&fmd_new->fmd_list,
- &fed->fed_mod_list);
+ list_add_tail(&fmd_new->fmd_list,
+ &fed->fed_mod_list);
fmd_new->fmd_fid = *fid;
fmd_new->fmd_refcount++; /* list reference */
found = fmd_new;
spin_lock(&fed->fed_lock);
found = ofd_fmd_find_nolock(exp, fid);
if (found) {
- cfs_list_del_init(&found->fmd_list);
+ list_del_init(&found->fmd_list);
ofd_fmd_put_nolock(exp, found);
}
spin_unlock(&fed->fed_lock);
struct ofd_mod_data *fmd = NULL, *tmp;
spin_lock(&fed->fed_lock);
- cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
- cfs_list_del_init(&fmd->fmd_list);
+ list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ list_del_init(&fmd->fmd_list);
if (fmd->fmd_refcount > 1) {
CDEBUG(D_INFO, "fmd %p still referenced (refcount = %d)\n",
fmd, fmd->fmd_refcount);
obd_size fo_tot_pending;
obd_size fo_tot_dirty;
- if (cfs_list_empty(&obd->obd_exports))
+ if (list_empty(&obd->obd_exports))
return;
/* We don't want to do this for large machines that do lots of
spin_lock(&obd->obd_dev_lock);
spin_lock(&ofd->ofd_grant_lock);
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
struct filter_export_data *fed;
int error = 0;
/* per-client-per-object persistent state (LRU) */
struct ofd_mod_data {
- cfs_list_t fmd_list; /* linked to fed_mod_list */
- struct lu_fid fmd_fid; /* FID being written to */
- __u64 fmd_mactime_xid; /* xid highest {m,a,c}time setattr */
- cfs_time_t fmd_expire; /* time when the fmd should expire */
- int fmd_refcount; /* reference counter - list holds 1 */
+ struct list_head fmd_list; /* linked to fed_mod_list */
+ struct lu_fid fmd_fid; /* FID being written to */
+ __u64 fmd_mactime_xid; /* xid highest {m,a,c}time setattr */
+ cfs_time_t fmd_expire; /* time when the fmd should expire */
+ int fmd_refcount; /* reference counter - list holds 1 */
};
#define OFD_FMD_MAX_NUM_DEFAULT 128
}
struct ofd_seq {
- cfs_list_t os_list;
+ struct list_head os_list;
struct ost_id os_oi;
spinlock_t os_last_oid_lock;
struct mutex os_create_lock;
__u64 ofd_inconsistency_self_detected;
__u64 ofd_inconsistency_self_repaired;
- cfs_list_t ofd_seq_list;
+ struct list_head ofd_seq_list;
rwlock_t ofd_seq_list_lock;
int ofd_seq_count;
int ofd_precreate_batch;
dcb = &ossc->ossc_cb;
dcb->dcb_func = ofd_cb_soft_sync;
- CFS_INIT_LIST_HEAD(&dcb->dcb_linkage);
+ INIT_LIST_HEAD(&dcb->dcb_linkage);
strlcpy(dcb->dcb_name, "ofd_cb_soft_sync", sizeof(dcb->dcb_name));
rc = dt_trans_cb_add(th, dcb);
int rc;
spin_lock_init(&exp->exp_filter_data.fed_lock);
- CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
+ INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
atomic_set(&exp->exp_filter_data.fed_soft_sync_count, 0);
spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
if (!(exp->exp_flags & OBD_OPT_FORCE))
ofd_grant_sanity_check(exp->exp_obd, __FUNCTION__);
- LASSERT(cfs_list_empty(&exp->exp_filter_data.fed_mod_list));
+ LASSERT(list_empty(&exp->exp_filter_data.fed_mod_list));
return 0;
}