Whamcloud - gitweb
LU-3963 libcfs: move mgs, osd-ldiskfs, osp, quota to linux list api 47/10247/4
authorJames Simmons <uja.ornl@gmail.com>
Fri, 9 May 2014 16:46:16 +0000 (12:46 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Tue, 20 May 2014 04:53:01 +0000 (04:53 +0000)
First part of the move of several of the server components
over to the linux list api.

Change-Id: Ib14d7c295f98decdca415e0904fe4fb710b83696
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/10247
Tested-by: Jenkins
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
28 files changed:
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_llog.c
lustre/mgs/mgs_nids.c
lustre/osd-ldiskfs/osd_compat.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_iam.c
lustre/osd-ldiskfs/osd_iam.h
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_quota.c
lustre/osd-ldiskfs/osd_quota_fmt.c
lustre/osd-ldiskfs/osd_scrub.c
lustre/osd-ldiskfs/osd_scrub.h
lustre/osp/osp_internal.h
lustre/osp/osp_sync.c
lustre/quota/lquota_entry.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_dev.c
lustre/quota/qmt_internal.h
lustre/quota/qmt_lock.c
lustre/quota/qmt_pool.c
lustre/quota/qsd_config.c
lustre/quota/qsd_internal.h
lustre/quota/qsd_lib.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_writeback.c
lustre/tests/it_test.c
lustre/utils/ll_recover_lost_found_objs.c

index 645e574..d535bf9 100644 (file)
@@ -575,7 +575,7 @@ static inline int mgs_init_export(struct obd_export *exp)
 
        /* init mgs_export_data for fsc */
        spin_lock_init(&data->med_lock);
-       CFS_INIT_LIST_HEAD(&data->med_clients);
+       INIT_LIST_HEAD(&data->med_clients);
 
        spin_lock(&exp->exp_lock);
        exp->exp_connecting = 1;
index f326a13..6d10d5f 100644 (file)
  */
 struct mgs_nidtbl;
 struct mgs_nidtbl_target {
-        cfs_list_t              mnt_list;
-        struct mgs_nidtbl      *mnt_fs;
-        u64                     mnt_version;
-        int                     mnt_type; /* OST or MDT */
-        cfs_time_t              mnt_last_active;
-        struct mgs_target_info  mnt_mti;
+       struct list_head        mnt_list;
+       struct mgs_nidtbl      *mnt_fs;
+       u64                     mnt_version;
+       int                     mnt_type;       /* OST or MDT */
+       cfs_time_t              mnt_last_active;
+       struct mgs_target_info  mnt_mti;
 };
 
 enum {
@@ -78,30 +78,30 @@ struct fs_db;
  * maintain fs client nodes of mgs.
  */
 struct mgs_fsc {
-        struct fs_db      *mfc_fsdb;
+       struct fs_db            *mfc_fsdb;
         /**
          * Where the fs client comes from.
          */
-        struct obd_export *mfc_export;
+       struct obd_export       *mfc_export;
         /**
          * list of fs clients from the same export,
          * protected by mgs_export_data->med_lock
          */
-        cfs_list_t         mfc_export_list;
+       struct list_head        mfc_export_list;
         /**
          * list of fs clients in the same fsdb, protected by fsdb->fsdb_mutex
          */
-        cfs_list_t        mfc_fsdb_list;
-        unsigned          mfc_ir_capable:1;
+       struct list_head        mfc_fsdb_list;
+       unsigned                mfc_ir_capable:1;
 };
 
 struct mgs_nidtbl {
-        struct fs_db *mn_fsdb;
-        struct file  *mn_version_file;
-       struct mutex    mn_lock;
-        u64           mn_version;
-        int           mn_nr_targets;
-        cfs_list_t    mn_targets;
+       struct fs_db            *mn_fsdb;
+       struct file             *mn_version_file;
+       struct mutex             mn_lock;
+       u64                      mn_version;
+       int                      mn_nr_targets;
+       struct list_head         mn_targets;
 };
 
 struct mgs_tgt_srpc_conf {
@@ -121,23 +121,23 @@ struct mgs_tgt_srpc_conf {
 #define FSDB_REVOKING_PARAMS   (6)  /* DLM lock is being revoked */
 
 struct fs_db {
-        char              fsdb_name[9];
-        cfs_list_t        fsdb_list;           /* list of databases */
+       char              fsdb_name[9];
+       struct list_head  fsdb_list;            /* list of databases */
        struct mutex      fsdb_mutex;
-        void             *fsdb_ost_index_map;  /* bitmap of used indicies */
-        void             *fsdb_mdt_index_map;  /* bitmap of used indicies */
-        int               fsdb_mdt_count;
-        char             *fsdb_clilov;       /* COMPAT_146 client lov name */
-        char             *fsdb_clilmv;
-        unsigned long     fsdb_flags;
-        __u32             fsdb_gen;
+       void             *fsdb_ost_index_map;   /* bitmap of used indicies */
+       void             *fsdb_mdt_index_map;   /* bitmap of used indicies */
+       int               fsdb_mdt_count;
+       char             *fsdb_clilov;  /* COMPAT_146 client lov name */
+       char             *fsdb_clilmv;
+       unsigned long     fsdb_flags;
+       __u32             fsdb_gen;
 
         /* in-memory copy of the srpc rules, guarded by fsdb_lock */
         struct sptlrpc_rule_set   fsdb_srpc_gen;
         struct mgs_tgt_srpc_conf *fsdb_srpc_tgt;
 
         /* list of fs clients, mgs_fsc. protected by mgs_mutex */
-        cfs_list_t           fsdb_clients;
+       struct list_head     fsdb_clients;
         int                  fsdb_nonir_clients;
         int                  fsdb_ir_state;
 
@@ -146,10 +146,10 @@ struct fs_db {
 
        /* async thread to notify clients */
        struct mgs_device    *fsdb_mgs;
-       wait_queue_head_t    fsdb_notify_waitq;
-       struct completion    fsdb_notify_comp;
-       cfs_time_t           fsdb_notify_start;
-       atomic_t         fsdb_notify_phase;
+       wait_queue_head_t     fsdb_notify_waitq;
+       struct completion     fsdb_notify_comp;
+       cfs_time_t            fsdb_notify_start;
+       atomic_t              fsdb_notify_phase;
        volatile unsigned int fsdb_notify_async:1,
                              fsdb_notify_stop:1;
         /* statistic data */
@@ -165,7 +165,7 @@ struct mgs_device {
        struct obd_export               *mgs_bottom_exp;
        struct dt_object                *mgs_configs_dir;
        struct dt_object                *mgs_nidtbl_dir;
-       cfs_list_t                       mgs_fs_db_list;
+       struct list_head                 mgs_fs_db_list;
        spinlock_t                       mgs_lock; /* covers mgs_fs_db_list */
        cfs_proc_dir_entry_t            *mgs_proc_live;
        cfs_proc_dir_entry_t            *mgs_proc_osd;
index b2e2a48..aca72e3 100644 (file)
@@ -314,10 +314,10 @@ static void mgs_free_fsdb_srpc(struct fs_db *fsdb)
 struct fs_db *mgs_find_fsdb(struct mgs_device *mgs, char *fsname)
 {
         struct fs_db *fsdb;
-        cfs_list_t *tmp;
+       struct list_head *tmp;
 
-        cfs_list_for_each(tmp, &mgs->mgs_fs_db_list) {
-                fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
+       list_for_each(tmp, &mgs->mgs_fs_db_list) {
+               fsdb = list_entry(tmp, struct fs_db, fsdb_list);
                 if (strcmp(fsdb->fsdb_name, fsname) == 0)
                         return fsdb;
         }
@@ -369,7 +369,7 @@ static struct fs_db *mgs_new_fsdb(const struct lu_env *env,
                lproc_mgs_add_live(mgs, fsdb);
         }
 
-        cfs_list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
+       list_add(&fsdb->fsdb_list, &mgs->mgs_fs_db_list);
 
         RETURN(fsdb);
 err:
@@ -385,10 +385,10 @@ err:
 
 static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
 {
-        /* wait for anyone with the sem */
+       /* wait for anyone with the sem */
        mutex_lock(&fsdb->fsdb_mutex);
        lproc_mgs_del_live(mgs, fsdb);
-        cfs_list_del(&fsdb->fsdb_list);
+       list_del(&fsdb->fsdb_list);
 
         /* deinitialize fsr */
        mgs_ir_fini_fs(mgs, fsdb);
@@ -406,21 +406,22 @@ static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
 
 int mgs_init_fsdb_list(struct mgs_device *mgs)
 {
-        CFS_INIT_LIST_HEAD(&mgs->mgs_fs_db_list);
+       INIT_LIST_HEAD(&mgs->mgs_fs_db_list);
         return 0;
 }
 
 int mgs_cleanup_fsdb_list(struct mgs_device *mgs)
 {
-        struct fs_db *fsdb;
-        cfs_list_t *tmp, *tmp2;
+       struct fs_db *fsdb;
+       struct list_head *tmp, *tmp2;
+
        mutex_lock(&mgs->mgs_mutex);
-        cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
-                fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
+       list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
+               fsdb = list_entry(tmp, struct fs_db, fsdb_list);
                mgs_free_fsdb(mgs, fsdb);
-        }
+       }
        mutex_unlock(&mgs->mgs_mutex);
-        return 0;
+       return 0;
 }
 
 int mgs_find_or_make_fsdb(const struct lu_env *env,
index abeff63..895ad98 100644 (file)
@@ -57,20 +57,20 @@ static unsigned int ir_timeout;
 
 static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
 {
-        struct mgs_nidtbl_target *tgt;
-        int version = 0;
+       struct mgs_nidtbl_target *tgt;
+       int version = 0;
 
        LASSERT(mutex_is_locked(&tbl->mn_lock));
-        cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
-                if (!tgt->mnt_version)
-                        continue;
+       list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+               if (!tgt->mnt_version)
+                       continue;
 
-                if (version >= tgt->mnt_version)
-                        return 0;
+               if (version >= tgt->mnt_version)
+                       return 0;
 
-                version = tgt->mnt_version;
-        }
-        return 1;
+               version = tgt->mnt_version;
+       }
+       return 1;
 }
 
 /**
@@ -113,7 +113,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl,
          * otherwise, it's for clients, then llog entries for both OSTs and
          * MDTs will be returned.
          */
-        cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+       list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
                 int entry_len = sizeof(*entry);
 
                 if (tgt->mnt_version < version)
@@ -332,10 +332,11 @@ static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
         type &= ~LDD_F_SV_TYPE_MGS;
         LASSERT(type != 0);
 
-        tbl = &fsdb->fsdb_nidtbl;
+       tbl = &fsdb->fsdb_nidtbl;
        mutex_lock(&tbl->mn_lock);
-        cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
-                struct mgs_target_info *info = &tgt->mnt_mti;
+       list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
+               struct mgs_target_info *info = &tgt->mnt_mti;
+
                 if (type == tgt->mnt_type &&
                     mti->mti_stripe_index == info->mti_stripe_index) {
                         found = true;
@@ -347,21 +348,21 @@ static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
                 if (tgt == NULL)
                         GOTO(out, rc = -ENOMEM);
 
-                CFS_INIT_LIST_HEAD(&tgt->mnt_list);
-                tgt->mnt_fs      = tbl;
-                tgt->mnt_version = 0;       /* 0 means invalid */
-                tgt->mnt_type    = type;
+               INIT_LIST_HEAD(&tgt->mnt_list);
+               tgt->mnt_fs      = tbl;
+               tgt->mnt_version = 0;   /* 0 means invalid */
+               tgt->mnt_type    = type;
 
-                ++tbl->mn_nr_targets;
-        }
+               ++tbl->mn_nr_targets;
+       }
 
-        tgt->mnt_version = ++tbl->mn_version;
-        tgt->mnt_mti     = *mti;
+       tgt->mnt_version = ++tbl->mn_version;
+       tgt->mnt_mti     = *mti;
 
-        cfs_list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
+       list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
 
        rc = nidtbl_update_version(env, fsdb->fsdb_mgs, tbl);
-        EXIT;
+       EXIT;
 
 out:
        mutex_unlock(&tbl->mn_lock);
@@ -374,17 +375,17 @@ out:
 static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
 {
         struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
-        CFS_LIST_HEAD(head);
+       struct list_head head = LIST_HEAD_INIT(head);
 
        mutex_lock(&tbl->mn_lock);
         tbl->mn_nr_targets = 0;
-        cfs_list_splice_init(&tbl->mn_targets, &head);
+       list_splice_init(&tbl->mn_targets, &head);
        mutex_unlock(&tbl->mn_lock);
 
-        while (!cfs_list_empty(&head)) {
+       while (!list_empty(&head)) {
                 struct mgs_nidtbl_target *tgt;
                 tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
-                cfs_list_del(&tgt->mnt_list);
+               list_del(&tgt->mnt_list);
                 OBD_FREE_PTR(tgt);
         }
 }
@@ -394,7 +395,7 @@ static int mgs_nidtbl_init_fs(const struct lu_env *env, struct fs_db *fsdb)
         struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
        int rc;
 
-        CFS_INIT_LIST_HEAD(&tbl->mn_targets);
+       INIT_LIST_HEAD(&tbl->mn_targets);
        mutex_init(&tbl->mn_lock);
         tbl->mn_nr_targets = 0;
         tbl->mn_fsdb = fsdb;
@@ -480,7 +481,7 @@ int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
                            mgs->mgs_start_time + ir_timeout))
                fsdb->fsdb_ir_state = IR_STARTUP;
        fsdb->fsdb_nonir_clients = 0;
-       CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
+       INIT_LIST_HEAD(&fsdb->fsdb_clients);
 
        /* start notify thread */
        fsdb->fsdb_mgs = mgs;
@@ -508,7 +509,7 @@ void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
 
        mgs_nidtbl_fini_fs(fsdb);
 
-       LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
+       LASSERT(list_empty(&fsdb->fsdb_clients));
 
        fsdb->fsdb_notify_stop = 1;
        wake_up(&fsdb->fsdb_notify_waitq);
@@ -873,8 +874,8 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
         if (new_fsc == NULL)
                 RETURN(-ENOMEM);
 
-       CFS_INIT_LIST_HEAD(&new_fsc->mfc_export_list);
-       CFS_INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
+       INIT_LIST_HEAD(&new_fsc->mfc_export_list);
+       INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
        new_fsc->mfc_fsdb       = fsdb;
        new_fsc->mfc_export     = class_export_get(exp);
        new_fsc->mfc_ir_capable = !!(exp_connect_flags(exp) &
@@ -885,7 +886,7 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
 
        /* tend to find it in export list because this list is shorter. */
        spin_lock(&data->med_lock);
-        cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
+       list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
                 if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
                         found = true;
                         break;
@@ -896,10 +897,10 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
                 new_fsc = NULL;
 
                 /* add it into export list. */
-                cfs_list_add(&fsc->mfc_export_list, &data->med_clients);
+               list_add(&fsc->mfc_export_list, &data->med_clients);
 
                 /* add into fsdb list. */
-                cfs_list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
+               list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
                 if (!fsc->mfc_ir_capable) {
                         ++fsdb->fsdb_nonir_clients;
                         if (fsdb->fsdb_ir_state == IR_FULL)
@@ -921,19 +922,19 @@ void mgs_fsc_cleanup(struct obd_export *exp)
 {
        struct mgs_export_data *data = &exp->u.eu_mgs_data;
        struct mgs_fsc *fsc, *tmp;
-       CFS_LIST_HEAD(head);
+       struct list_head head = LIST_HEAD_INIT(head);
 
        spin_lock(&data->med_lock);
-       cfs_list_splice_init(&data->med_clients, &head);
+       list_splice_init(&data->med_clients, &head);
        spin_unlock(&data->med_lock);
 
-       cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
+       list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
                struct fs_db *fsdb = fsc->mfc_fsdb;
 
                LASSERT(fsc->mfc_export == exp);
 
                mutex_lock(&fsdb->fsdb_mutex);
-               cfs_list_del_init(&fsc->mfc_fsdb_list);
+               list_del_init(&fsc->mfc_fsdb_list);
                if (fsc->mfc_ir_capable == 0) {
                        --fsdb->fsdb_nonir_clients;
                        LASSERT(fsdb->fsdb_ir_state != IR_FULL);
@@ -942,7 +943,7 @@ void mgs_fsc_cleanup(struct obd_export *exp)
                                fsdb->fsdb_ir_state = IR_FULL;
                }
                mutex_unlock(&fsdb->fsdb_mutex);
-               cfs_list_del_init(&fsc->mfc_export_list);
+               list_del_init(&fsc->mfc_export_list);
                class_export_put(fsc->mfc_export);
                OBD_FREE_PTR(fsc);
        }
@@ -953,15 +954,15 @@ void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
 {
         struct mgs_fsc *fsc, *tmp;
 
-        cfs_list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
+       list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
                                      mfc_fsdb_list) {
                 struct mgs_export_data *data = &fsc->mfc_export->u.eu_mgs_data;
 
                 LASSERT(fsdb == fsc->mfc_fsdb);
-                cfs_list_del_init(&fsc->mfc_fsdb_list);
+               list_del_init(&fsc->mfc_fsdb_list);
 
                spin_lock(&data->med_lock);
-               cfs_list_del_init(&fsc->mfc_export_list);
+               list_del_init(&fsc->mfc_export_list);
                spin_unlock(&data->med_lock);
                 class_export_put(fsc->mfc_export);
                 OBD_FREE_PTR(fsc);
index d01748d..9d01f72 100644 (file)
@@ -375,7 +375,7 @@ static int osd_ost_init(const struct lu_env *env, struct osd_device *dev)
        dev->od_ost_map->om_subdir_count = rc;
         rc = 0;
 
-       CFS_INIT_LIST_HEAD(&dev->od_ost_map->om_seq_list);
+       INIT_LIST_HEAD(&dev->od_ost_map->om_seq_list);
        rwlock_init(&dev->od_ost_map->om_seq_list_lock);
        mutex_init(&dev->od_ost_map->om_dir_init_mutex);
 
@@ -422,7 +422,7 @@ static void osd_seq_free(struct osd_obj_map *map,
 {
        int j;
 
-       cfs_list_del_init(&osd_seq->oos_seq_list);
+       list_del_init(&osd_seq->oos_seq_list);
 
        if (osd_seq->oos_dirs) {
                for (j = 0; j < osd_seq->oos_subdir_count; j++) {
@@ -450,9 +450,8 @@ static void osd_ost_fini(struct osd_device *osd)
                return;
 
        write_lock(&map->om_seq_list_lock);
-       cfs_list_for_each_entry_safe(osd_seq, tmp,
-                                    &map->om_seq_list,
-                                    oos_seq_list) {
+       list_for_each_entry_safe(osd_seq, tmp, &map->om_seq_list,
+                                oos_seq_list) {
                osd_seq_free(map, osd_seq);
        }
        write_unlock(&map->om_seq_list_lock);
@@ -483,7 +482,7 @@ struct osd_obj_seq *osd_seq_find_locked(struct osd_obj_map *map, obd_seq seq)
 {
        struct osd_obj_seq *osd_seq;
 
-       cfs_list_for_each_entry(osd_seq, &map->om_seq_list, oos_seq_list) {
+       list_for_each_entry(osd_seq, &map->om_seq_list, oos_seq_list) {
                if (osd_seq->oos_seq == seq)
                        return osd_seq;
        }
@@ -828,7 +827,7 @@ static struct osd_obj_seq *osd_seq_load(struct osd_thread_info *info,
        if (osd_seq == NULL)
                GOTO(cleanup, rc = -ENOMEM);
 
-       CFS_INIT_LIST_HEAD(&osd_seq->oos_seq_list);
+       INIT_LIST_HEAD(&osd_seq->oos_seq_list);
        osd_seq->oos_seq = seq;
        /* Init subdir count to be 32, but each seq can have
         * different subdir count */
@@ -838,7 +837,7 @@ static struct osd_obj_seq *osd_seq_load(struct osd_thread_info *info,
                GOTO(cleanup, rc);
 
        write_lock(&map->om_seq_list_lock);
-       cfs_list_add(&osd_seq->oos_seq_list, &map->om_seq_list);
+       list_add(&osd_seq->oos_seq_list, &map->om_seq_list);
        write_unlock(&map->om_seq_list_lock);
 
 cleanup:
index 8f32d68..108ddde 100644 (file)
@@ -586,7 +586,7 @@ static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
        }
 
        id = &info->oti_id;
-       if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+       if (!list_empty(&scrub->os_inconsistent_items)) {
                /* Search order: 2. OI scrub pending list. */
                result = osd_oii_lookup(dev, fid, id);
                if (result == 0)
@@ -912,11 +912,11 @@ static void osd_trans_commit_cb(struct super_block *sb,
         dt_txn_hook_commit(th);
 
        /* call per-transaction callbacks if any */
-       cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+       list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
                LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
                         "commit callback entry: magic=%x name='%s'\n",
                         dcb->dcb_magic, dcb->dcb_name);
-               cfs_list_del_init(&dcb->dcb_linkage);
+               list_del_init(&dcb->dcb_linkage);
                dcb->dcb_func(NULL, th, dcb, error);
        }
 
@@ -954,7 +954,7 @@ static struct thandle *osd_trans_create(const struct lu_env *env,
                atomic_set(&th->th_refc, 1);
                th->th_alloc_size = sizeof(*oh);
                oti->oti_dev = osd_dt_dev(d);
-               CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
+               INIT_LIST_HEAD(&oh->ot_dcb_list);
                osd_th_alloced(oh);
 
                memset(oti->oti_declare_ops, 0,
@@ -1165,7 +1165,7 @@ static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
 
        LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
        LASSERT(&dcb->dcb_func != NULL);
-       cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+       list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
 
        return 0;
 }
@@ -5850,7 +5850,7 @@ static int osd_device_init0(const struct lu_env *env,
        /* self-repair LMA by default */
        o->od_lma_self_repair = 1;
 
-       CFS_INIT_LIST_HEAD(&o->od_ios_list);
+       INIT_LIST_HEAD(&o->od_ios_list);
        /* setup scrub, including OI files initialization */
        rc = osd_scrub_setup(env, o);
        if (rc < 0)
index 009ec9a..e7b5ae6 100644 (file)
  *
  * No locking. Callers synchronize.
  */
-static CFS_LIST_HEAD(iam_formats);
+static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
 
 void iam_format_register(struct iam_format *fmt)
 {
-        cfs_list_add(&fmt->if_linkage, &iam_formats);
+       list_add(&fmt->if_linkage, &iam_formats);
 }
 EXPORT_SYMBOL(iam_format_register);
 
@@ -222,7 +222,7 @@ static int iam_format_guess(struct iam_container *c)
         }
 
         result = -ENOENT;
-        cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
+       list_for_each_entry(fmt, &iam_formats, if_linkage) {
                 result = fmt->if_guess(c);
                 if (result == 0)
                         break;
index 697dd15..78a1ce5 100644 (file)
@@ -1094,7 +1094,7 @@ struct iam_format {
         /*
          * Linkage into global list of container formats.
          */
-        cfs_list_t if_linkage;
+       struct list_head if_linkage;
 };
 
 void iam_format_register(struct iam_format *fmt);
index cb1299e..856d66b 100644 (file)
@@ -146,21 +146,21 @@ struct osd_obj_seq {
        struct dentry    *oos_root;        /* O/<seq> */
        struct dentry    **oos_dirs;       /* O/<seq>/d0-dXX */
        obd_seq          oos_seq;          /* seq number */
-       cfs_list_t       oos_seq_list;     /* list to seq_list */
+       struct list_head oos_seq_list;     /* list to seq_list */
 };
 
 struct osd_obj_map {
        struct dentry    *om_root;        /* dentry for /O */
        rwlock_t         om_seq_list_lock; /* lock for seq_list */
-       cfs_list_t       om_seq_list;      /* list head for seq */
+       struct list_head om_seq_list;      /* list head for seq */
        int              om_subdir_count;
        struct mutex     om_dir_init_mutex;
 };
 
 struct osd_mdobj {
        struct dentry   *om_root;      /* AGENT/<index> */
-       obd_seq         om_index;     /* mdt index */
-       cfs_list_t      om_list;      /* list to omm_list */
+       obd_seq          om_index;     /* mdt index */
+       struct list_head om_list;      /* list to omm_list */
 };
 
 struct osd_mdobj_map {
@@ -176,7 +176,7 @@ struct osd_mdobj_map {
 struct osd_inconsistent_item {
        /* link into osd_scrub::os_inconsistent_items,
         * protected by osd_scrub::os_lock. */
-       cfs_list_t             oii_list;
+       struct list_head       oii_list;
 
        /* The right FID <=> ino#/gen mapping. */
        struct osd_idmap_cache oii_cache;
@@ -243,9 +243,9 @@ struct osd_device {
         unsigned long             od_capa_timeout;
         __u32                     od_capa_alg;
         struct lustre_capa_key   *od_capa_keys;
-        cfs_hlist_head_t         *od_capa_hash;
+       struct hlist_head        *od_capa_hash;
 
-        cfs_proc_dir_entry_t     *od_proc_entry;
+       struct proc_dir_entry    *od_proc_entry;
         struct lprocfs_stats     *od_stats;
 
        spinlock_t                od_osfs_lock;
@@ -267,7 +267,7 @@ struct osd_device {
        struct mutex              od_otable_mutex;
        struct osd_otable_it     *od_otable_it;
        struct osd_scrub          od_scrub;
-       cfs_list_t                od_ios_list;
+       struct list_head                  od_ios_list;
 
        /* service name associated with the osd device */
        char                      od_svname[MAX_OBD_NAME];
@@ -313,7 +313,7 @@ struct osd_thandle {
         struct thandle          ot_super;
         handle_t               *ot_handle;
         struct ldiskfs_journal_cb_entry ot_jcb;
-        cfs_list_t              ot_dcb_list;
+       struct list_head              ot_dcb_list;
        /* Link to the device, for debugging. */
        struct lu_ref_link      ot_dev_link;
         unsigned short          ot_credits;
@@ -427,7 +427,7 @@ struct osd_it_iam {
 };
 
 struct osd_quota_leaf {
-       cfs_list_t      oql_link;
+       struct list_head        oql_link;
        uint            oql_blk;
 };
 
@@ -445,7 +445,7 @@ struct osd_it_quota {
        /** the record index in the leaf/index block */
        uint                     oiq_index[LUSTRE_DQTREEDEPTH + 1];
        /** list of already processed leaf blocks */
-       cfs_list_t               oiq_list;
+       struct list_head         oiq_list;
 };
 
 #define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
index feda48d..a56f890 100644 (file)
@@ -170,7 +170,7 @@ static struct dt_it *osd_it_acct_init(const struct lu_env *env,
        memset(it, 0, sizeof(*it));
        lu_object_get(lo);
        it->oiq_obj = obj;
-       CFS_INIT_LIST_HEAD(&it->oiq_list);
+       INIT_LIST_HEAD(&it->oiq_list);
 
        /* LUSTRE_DQTREEOFF is the initial offset where the tree can be found */
        it->oiq_blk[0] = LUSTRE_DQTREEOFF;
@@ -194,8 +194,8 @@ static void osd_it_acct_fini(const struct lu_env *env, struct dt_it *di)
 
        lu_object_put(env, &it->oiq_obj->oo_dt.do_lu);
 
-       cfs_list_for_each_entry_safe(leaf, tmp, &it->oiq_list, oql_link) {
-               cfs_list_del_init(&leaf->oql_link);
+       list_for_each_entry_safe(leaf, tmp, &it->oiq_list, oql_link) {
+               list_del_init(&leaf->oql_link);
                OBD_FREE_PTR(leaf);
        }
        EXIT;
@@ -262,9 +262,9 @@ static int osd_it_add_processed(struct osd_it_quota *it, int depth)
        OBD_ALLOC_PTR(leaf);
        if (leaf == NULL)
                RETURN(-ENOMEM);
-       CFS_INIT_LIST_HEAD(&leaf->oql_link);
+       INIT_LIST_HEAD(&leaf->oql_link);
        leaf->oql_blk = it->oiq_blk[depth];
-       cfs_list_add_tail(&leaf->oql_link, &it->oiq_list);
+       list_add_tail(&leaf->oql_link, &it->oiq_list);
        RETURN(0);
 }
 
index f9b3e9c..e37e910 100644 (file)
@@ -227,7 +227,7 @@ int walk_block_dqentry(const struct lu_env *env, struct osd_object *obj,
        ENTRY;
 
        /* check if the leaf block has been processed before */
-       cfs_list_for_each_entry(leaf, &it->oiq_list, oql_link) {
+       list_for_each_entry(leaf, &it->oiq_list, oql_link) {
                if (leaf->oql_blk == blk)
                        RETURN(1);
        }
index cc0d8b7..7287a8b 100644 (file)
@@ -446,8 +446,8 @@ osd_scrub_check_update(struct osd_thread_info *info, struct osd_device *dev,
                GOTO(out, rc = val);
 
        if (scrub->os_in_prior)
-               oii = cfs_list_entry(oic, struct osd_inconsistent_item,
-                                    oii_cache);
+               oii = list_entry(oic, struct osd_inconsistent_item,
+                                oii_cache);
 
        if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
                GOTO(out, rc = 0);
@@ -590,10 +590,10 @@ out:
                iput(inode);
 
        if (oii != NULL) {
-               LASSERT(!cfs_list_empty(&oii->oii_list));
+               LASSERT(!list_empty(&oii->oii_list));
 
                spin_lock(&scrub->os_lock);
-               cfs_list_del_init(&oii->oii_list);
+               list_del_init(&oii->oii_list);
                spin_unlock(&scrub->os_lock);
                OBD_FREE_PTR(oii);
        }
@@ -937,7 +937,7 @@ static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
 
                lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
                l_wait_event(thread->t_ctl_waitq,
-                            !cfs_list_empty(&scrub->os_inconsistent_items) ||
+                            !list_empty(&scrub->os_inconsistent_items) ||
                             !thread_is_running(thread),
                             &lwi);
        }
@@ -955,11 +955,11 @@ static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
        if (unlikely(!thread_is_running(thread)))
                return SCRUB_NEXT_EXIT;
 
-       if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+       if (!list_empty(&scrub->os_inconsistent_items)) {
                struct osd_inconsistent_item *oii;
 
-               oii = cfs_list_entry(scrub->os_inconsistent_items.next,
-                                    struct osd_inconsistent_item, oii_list);
+               oii = list_entry(scrub->os_inconsistent_items.next,
+                                struct osd_inconsistent_item, oii_list);
                *oic = &oii->oii_cache;
                scrub->os_in_prior = 1;
                return 0;
@@ -1014,7 +1014,7 @@ osd_scrub_wakeup(struct osd_scrub *scrub, struct osd_otable_it *it)
 {
        spin_lock(&scrub->os_lock);
        if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
-           !cfs_list_empty(&scrub->os_inconsistent_items) ||
+           !list_empty(&scrub->os_inconsistent_items) ||
            it->ooi_waiting || !thread_is_running(&scrub->os_thread))
                scrub->os_waiting = 0;
        else
@@ -1272,12 +1272,12 @@ post:
               rc, scrub->os_pos_current);
 
 out:
-       while (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+       while (!list_empty(&scrub->os_inconsistent_items)) {
                struct osd_inconsistent_item *oii;
 
-               oii = cfs_list_entry(scrub->os_inconsistent_items.next,
+               oii = list_entry(scrub->os_inconsistent_items.next,
                                     struct osd_inconsistent_item, oii_list);
-               cfs_list_del_init(&oii->oii_list);
+               list_del_init(&oii->oii_list);
                OBD_FREE_PTR(oii);
        }
        lu_env_fini(&env);
@@ -1436,7 +1436,7 @@ static const struct osd_lf_map osd_dl_maps[] = {
 };
 
 struct osd_ios_item {
-       cfs_list_t       oii_list;
+       struct list_head oii_list;
        struct dentry   *oii_dentry;
        scandir_t        oii_scandir;
        filldir_t        oii_filldir;
@@ -1480,11 +1480,11 @@ osd_ios_new_item(struct osd_device *dev, struct dentry *dentry,
        if (item == NULL)
                RETURN(-ENOMEM);
 
-       CFS_INIT_LIST_HEAD(&item->oii_list);
+       INIT_LIST_HEAD(&item->oii_list);
        item->oii_dentry = dget(dentry);
        item->oii_scandir = scandir;
        item->oii_filldir = filldir;
-       cfs_list_add_tail(&item->oii_list, &dev->od_ios_list);
+       list_add_tail(&item->oii_list, &dev->od_ios_list);
 
        RETURN(0);
 }
@@ -1918,12 +1918,12 @@ static int osd_initial_OI_scrub(struct osd_thread_info *info,
                if (rc != 0)
                        break;
 
-               if (cfs_list_empty(&dev->od_ios_list))
+               if (list_empty(&dev->od_ios_list))
                        break;
 
-               item = cfs_list_entry(dev->od_ios_list.next,
-                                     struct osd_ios_item, oii_list);
-               cfs_list_del_init(&item->oii_list);
+               item = list_entry(dev->od_ios_list.next,
+                                 struct osd_ios_item, oii_list);
+               list_del_init(&item->oii_list);
 
                LASSERT(item->oii_scandir != NULL);
                scandir = item->oii_scandir;
@@ -1931,10 +1931,10 @@ static int osd_initial_OI_scrub(struct osd_thread_info *info,
                dentry = item->oii_dentry;
        }
 
-       while (!cfs_list_empty(&dev->od_ios_list)) {
-               item = cfs_list_entry(dev->od_ios_list.next,
-                                     struct osd_ios_item, oii_list);
-               cfs_list_del_init(&item->oii_list);
+       while (!list_empty(&dev->od_ios_list)) {
+               item = list_entry(dev->od_ios_list.next,
+                                 struct osd_ios_item, oii_list);
+               list_del_init(&item->oii_list);
                dput(item->oii_dentry);
                OBD_FREE_PTR(item);
        }
@@ -2112,7 +2112,7 @@ int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
        init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
        init_rwsem(&scrub->os_rwsem);
        spin_lock_init(&scrub->os_lock);
-       CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
+       INIT_LIST_HEAD(&scrub->os_inconsistent_items);
 
        push_ctxt(&saved, ctxt);
        filp = filp_open(osd_scrub_name, O_RDWR | O_CREAT, 0644);
@@ -2511,7 +2511,7 @@ int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
        if (unlikely(oii == NULL))
                RETURN(-ENOMEM);
 
-       CFS_INIT_LIST_HEAD(&oii->oii_list);
+       INIT_LIST_HEAD(&oii->oii_list);
        oii->oii_cache = *oic;
        oii->oii_insert = insert;
 
@@ -2522,9 +2522,9 @@ int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
                RETURN(-EAGAIN);
        }
 
-       if (cfs_list_empty(&scrub->os_inconsistent_items))
+       if (list_empty(&scrub->os_inconsistent_items))
                wakeup = 1;
-       cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
+       list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
        spin_unlock(&scrub->os_lock);
 
        if (wakeup != 0)
@@ -2541,7 +2541,7 @@ int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
        ENTRY;
 
        spin_lock(&scrub->os_lock);
-       cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
+       list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
                if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
                        *id = oii->oii_cache.oic_lid;
                        spin_unlock(&scrub->os_lock);
index 3c32dcb..f046e33 100644 (file)
@@ -187,7 +187,7 @@ struct osd_scrub {
        struct lvfs_run_ctxt    os_ctxt;
        struct ptlrpc_thread    os_thread;
        struct osd_idmap_cache  os_oic;
-       cfs_list_t              os_inconsistent_items;
+       struct list_head        os_inconsistent_items;
 
        /* write lock for scrub prep/update/post/checkpoint,
         * read lock for scrub dump. */
index e220914..3a5d0a6 100644 (file)
@@ -59,8 +59,8 @@ struct osp_id_tracker {
        /* callback is register once per diskfs -- that's the whole point */
        struct dt_txn_callback   otr_tx_cb;
        /* single node can run many clusters */
-       cfs_list_t               otr_wakeup_list;
-       cfs_list_t               otr_list;
+       struct list_head         otr_wakeup_list;
+       struct list_head         otr_list;
        /* underlying shared device */
        struct dt_device        *otr_dev;
        /* how many users of this tracker */
@@ -154,7 +154,7 @@ struct osp_device {
        struct ptlrpc_thread             opd_syn_thread;
        wait_queue_head_t                opd_syn_waitq;
        /* list of remotely committed rpc */
-       cfs_list_t                       opd_syn_committed_there;
+       struct list_head                 opd_syn_committed_there;
        /* number of changes being under sync */
        int                              opd_syn_sync_in_progress;
        /* number of RPCs in flight - flow control */
@@ -173,7 +173,7 @@ struct osp_device {
        /* last processed (taken from llog) id */
        unsigned long                    opd_syn_last_processed_id;
        struct osp_id_tracker           *opd_syn_tracker;
-       cfs_list_t                       opd_syn_ontrack;
+       struct list_head                 opd_syn_ontrack;
 
        /*
         * statfs related fields: OSP maintains it on its own
index d0805f3..388c80e 100644 (file)
@@ -128,7 +128,7 @@ static inline int osp_sync_has_work(struct osp_device *d)
                return 1;
 
        /* has remotely committed? */
-       if (!cfs_list_empty(&d->opd_syn_committed_there))
+       if (!list_empty(&d->opd_syn_committed_there))
                return 1;
 
        return 0;
@@ -321,12 +321,12 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
 
        LASSERT(d);
        LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
-       LASSERT(cfs_list_empty(&req->rq_exp_list));
+       LASSERT(list_empty(&req->rq_exp_list));
 
        ptlrpc_request_addref(req);
 
        spin_lock(&d->opd_syn_lock);
-       cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+       list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
        spin_unlock(&d->opd_syn_lock);
 
        /* XXX: some batching wouldn't hurt */
@@ -354,12 +354,12 @@ static int osp_sync_interpret(const struct lu_env *env,
                 * but object doesn't exist anymore - cancell llog record
                 */
                LASSERT(req->rq_transno == 0);
-               LASSERT(cfs_list_empty(&req->rq_exp_list));
+               LASSERT(list_empty(&req->rq_exp_list));
 
                ptlrpc_request_addref(req);
 
                spin_lock(&d->opd_syn_lock);
-               cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
+               list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
                spin_unlock(&d->opd_syn_lock);
 
                wake_up(&d->opd_syn_waitq);
@@ -455,7 +455,7 @@ static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
        body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
        body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
        body->oa.o_lcookie.lgc_index = h->lrh_index;
-       CFS_INIT_LIST_HEAD(&req->rq_exp_list);
+       INIT_LIST_HEAD(&req->rq_exp_list);
        req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
 
        req->rq_interpret_reply = osp_sync_interpret;
@@ -722,12 +722,12 @@ static void osp_sync_process_committed(const struct lu_env *env,
        struct ptlrpc_request   *req, *tmp;
        struct llog_ctxt        *ctxt;
        struct llog_handle      *llh;
-       cfs_list_t               list;
+       struct list_head         list;
        int                      rc, done = 0;
 
        ENTRY;
 
-       if (cfs_list_empty(&d->opd_syn_committed_there))
+       if (list_empty(&d->opd_syn_committed_there))
                return;
 
        /*
@@ -752,17 +752,17 @@ static void osp_sync_process_committed(const struct lu_env *env,
        llh = ctxt->loc_handle;
        LASSERT(llh);
 
-       CFS_INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&list);
        spin_lock(&d->opd_syn_lock);
-       cfs_list_splice(&d->opd_syn_committed_there, &list);
-       CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+       list_splice(&d->opd_syn_committed_there, &list);
+       INIT_LIST_HEAD(&d->opd_syn_committed_there);
        spin_unlock(&d->opd_syn_lock);
 
-       cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
+       list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
                struct llog_cookie *lcookie = NULL;
 
                LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
-               cfs_list_del_init(&req->rq_exp_list);
+               list_del_init(&req->rq_exp_list);
 
                if (d->opd_connect_mdt) {
                        struct object_update_request *ureq;
@@ -884,7 +884,7 @@ static int osp_sync_process_queues(const struct lu_env *env,
                l_wait_event(d->opd_syn_waitq,
                             !osp_sync_running(d) ||
                             osp_sync_can_process_new(d, rec) ||
-                            !cfs_list_empty(&d->opd_syn_committed_there),
+                            !list_empty(&d->opd_syn_committed_there),
                             &lwi);
        } while (1);
 }
@@ -966,7 +966,7 @@ static int osp_sync_thread(void *_arg)
                LASSERTF(count < 10, "%s: %d %d %sempty\n",
                         d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
                         d->opd_syn_rpc_in_flight,
-                        cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!");
+                        list_empty(&d->opd_syn_committed_there) ? "" : "!");
 
        }
 
@@ -979,7 +979,7 @@ out:
                 "%s: %d %d %sempty\n",
                 d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
                 d->opd_syn_rpc_in_flight,
-                cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
+                list_empty(&d->opd_syn_committed_there) ? "" : "!");
 
        thread->t_flags = SVC_STOPPED;
 
@@ -1132,7 +1132,7 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d)
        spin_lock_init(&d->opd_syn_lock);
        init_waitqueue_head(&d->opd_syn_waitq);
        init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
-       CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
+       INIT_LIST_HEAD(&d->opd_syn_committed_there);
 
        task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",
                           d->opd_index, d->opd_group);
@@ -1174,7 +1174,8 @@ int osp_sync_fini(struct osp_device *d)
 }
 
 static DEFINE_MUTEX(osp_id_tracker_sem);
-static CFS_LIST_HEAD(osp_id_tracker_list);
+static struct list_head osp_id_tracker_list =
+               LIST_HEAD_INIT(osp_id_tracker_list);
 
 static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
 {
@@ -1194,8 +1195,8 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
                       tr->otr_committed_id, txn->oti_current_id);
                tr->otr_committed_id = txn->oti_current_id;
 
-               cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
-                                       opd_syn_ontrack) {
+               list_for_each_entry(d, &tr->otr_wakeup_list,
+                                   opd_syn_ontrack) {
                        d->opd_syn_last_committed_id = tr->otr_committed_id;
                        wake_up(&d->opd_syn_waitq);
                }
@@ -1211,10 +1212,10 @@ static int osp_sync_id_traction_init(struct osp_device *d)
        LASSERT(d);
        LASSERT(d->opd_storage);
        LASSERT(d->opd_syn_tracker == NULL);
-       CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
+       INIT_LIST_HEAD(&d->opd_syn_ontrack);
 
        mutex_lock(&osp_id_tracker_sem);
-       cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
+       list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
                if (tr->otr_dev == d->opd_storage) {
                        LASSERT(atomic_read(&tr->otr_refcount));
                        atomic_inc(&tr->otr_refcount);
@@ -1234,8 +1235,8 @@ static int osp_sync_id_traction_init(struct osp_device *d)
                        tr->otr_next_id = 1;
                        tr->otr_committed_id = 0;
                        atomic_set(&tr->otr_refcount, 1);
-                       CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
-                       cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
+                       INIT_LIST_HEAD(&tr->otr_wakeup_list);
+                       list_add(&tr->otr_list, &osp_id_tracker_list);
                        tr->otr_tx_cb.dtc_txn_commit =
                                                osp_sync_tracker_commit_cb;
                        tr->otr_tx_cb.dtc_cookie = tr;
@@ -1267,8 +1268,8 @@ static void osp_sync_id_traction_fini(struct osp_device *d)
        mutex_lock(&osp_id_tracker_sem);
        if (atomic_dec_and_test(&tr->otr_refcount)) {
                dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
-               LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
-               cfs_list_del(&tr->otr_list);
+               LASSERT(list_empty(&tr->otr_wakeup_list));
+               list_del(&tr->otr_list);
                OBD_FREE_PTR(tr);
                d->opd_syn_tracker = NULL;
        }
@@ -1301,8 +1302,8 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
                id = tr->otr_next_id++;
        if (id > d->opd_syn_last_used_id)
                d->opd_syn_last_used_id = id;
-       if (cfs_list_empty(&d->opd_syn_ontrack))
-               cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
+       if (list_empty(&d->opd_syn_ontrack))
+               list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
        spin_unlock(&tr->otr_lock);
        CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
 
@@ -1316,11 +1317,11 @@ static void osp_sync_remove_from_tracker(struct osp_device *d)
        tr = d->opd_syn_tracker;
        LASSERT(tr);
 
-       if (cfs_list_empty(&d->opd_syn_ontrack))
+       if (list_empty(&d->opd_syn_ontrack))
                return;
 
        spin_lock(&tr->otr_lock);
-       cfs_list_del_init(&d->opd_syn_ontrack);
+       list_del_init(&d->opd_syn_ontrack);
        spin_unlock(&tr->otr_lock);
 }
 
index 0bed97c..49a31ac 100644 (file)
@@ -43,40 +43,40 @@ static unsigned lqe64_hash_hash(cfs_hash_t *hs, const void *key, unsigned mask)
        return cfs_hash_u64_hash(*((__u64 *)key), mask);
 }
 
-static void *lqe64_hash_key(cfs_hlist_node_t *hnode)
+static void *lqe64_hash_key(struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        return &lqe->lqe_id.qid_uid;
 }
 
-static int lqe64_hash_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lqe64_hash_keycmp(const void *key, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        return (lqe->lqe_id.qid_uid == *((__u64*)key));
 }
 
-static void *lqe_hash_object(cfs_hlist_node_t *hnode)
+static void *lqe_hash_object(struct hlist_node *hnode)
 {
-       return cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       return hlist_entry(hnode, struct lquota_entry, lqe_hash);
 }
 
-static void lqe_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_getref(lqe);
 }
 
-static void lqe_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_putref(lqe);
 }
 
-static void lqe_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        CERROR("Should not have any item left!\n");
 }
@@ -115,12 +115,12 @@ struct lqe_iter_data {
 };
 
 static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                      cfs_hlist_node_t *hnode, void *data)
+                      struct hlist_node *hnode, void *data)
 {
        struct lqe_iter_data *d = (struct lqe_iter_data *)data;
        struct lquota_entry  *lqe;
 
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        LASSERT(atomic_read(&lqe->lqe_ref) > 0);
 
        /* Only one reference held by hash table, and nobody else can
@@ -332,7 +332,7 @@ struct lquota_entry *lqe_locate(const struct lu_env *env,
        atomic_set(&new->lqe_ref, 1); /* hold 1 for caller */
        new->lqe_id     = *qid;
        new->lqe_site   = site;
-       CFS_INIT_LIST_HEAD(&new->lqe_link);
+       INIT_LIST_HEAD(&new->lqe_link);
 
        /* quota settings need to be updated from disk, that's why
         * lqe->lqe_uptodate isn't set yet */
index 2a094ab..08b15ed 100644 (file)
@@ -138,7 +138,7 @@ struct lquota_slv_entry {
  * A lquota_entry structure belong to a single lquota_site */
 struct lquota_entry {
        /* link to site hash table */
-       cfs_hlist_node_t         lqe_hash;
+       struct hlist_node        lqe_hash;
 
        /* quota identifier associated with this entry */
        union lquota_id          lqe_id;
@@ -152,7 +152,7 @@ struct lquota_entry {
        /* linked to list of lqes which:
         * - need quota space adjustment on slave
         * - need glimpse to be sent on master */
-       cfs_list_t               lqe_link;
+       struct list_head         lqe_link;
 
        /* current quota settings/usage of this ID */
        __u64           lqe_granted; /* granted limit, inodes or kbytes */
index 28783a0..0bd91e7 100644 (file)
@@ -237,7 +237,7 @@ static int qmt_device_init0(const struct lu_env *env, struct qmt_device *qmt,
        /* set up and start rebalance thread */
        thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
        init_waitqueue_head(&qmt->qmt_reba_thread.t_ctl_waitq);
-       CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
+       INIT_LIST_HEAD(&qmt->qmt_reba_list);
        spin_lock_init(&qmt->qmt_reba_lock);
        rc = qmt_start_reba_thread(qmt);
        if (rc) {
index ac541b2..b4df058 100644 (file)
@@ -68,7 +68,7 @@ struct qmt_device {
        cfs_hash_t              *qmt_pool_hash;
 
        /* List of pools managed by this master target */
-       cfs_list_t               qmt_pool_list;
+       struct list_head         qmt_pool_list;
 
        /* procfs root directory for this qmt */
        cfs_proc_dir_entry_t    *qmt_proc;
@@ -77,7 +77,7 @@ struct qmt_device {
        struct ptlrpc_thread     qmt_reba_thread;
 
        /* list of lqe entry which need space rebalancing */
-       cfs_list_t               qmt_reba_list;
+       struct list_head         qmt_reba_list;
 
        /* lock protecting rebalancing list */
        spinlock_t               qmt_reba_lock;
@@ -96,10 +96,10 @@ struct qmt_device {
  */
 struct qmt_pool_info {
        /* link to qmt's pool hash */
-       cfs_hlist_node_t         qpi_hash;
+       struct hlist_node        qpi_hash;
 
        /* chained list of all pools managed by the same qmt */
-       cfs_list_t               qpi_linkage;
+       struct list_head         qpi_linkage;
 
        /* Pool key composed of pool_id | (pool_type << 16)
         * Only pool ID 0 is supported for now and the pool type is either
index f722617..04acf61 100644 (file)
@@ -457,19 +457,19 @@ static int qmt_glimpse_lock(const struct lu_env *env, struct qmt_device *qmt,
                            struct ldlm_resource *res, union ldlm_gl_desc *desc,
                            qmt_glimpse_cb_t cb, void *arg)
 {
-       cfs_list_t      *tmp, *pos;
-       CFS_LIST_HEAD(gl_list);
+       struct list_head *tmp, *pos;
+       struct list_head gl_list = LIST_HEAD_INIT(gl_list);
        int              rc = 0;
        ENTRY;
 
        lock_res(res);
        /* scan list of granted locks */
-       cfs_list_for_each(pos, &res->lr_granted) {
+       list_for_each(pos, &res->lr_granted) {
                struct ldlm_glimpse_work        *work;
                struct ldlm_lock                *lock;
                struct obd_uuid                 *uuid;
 
-               lock = cfs_list_entry(pos, struct ldlm_lock, l_res_link);
+               lock = list_entry(pos, struct ldlm_lock, l_res_link);
                LASSERT(lock->l_export);
                uuid = &lock->l_export->exp_client_uuid;
 
@@ -493,7 +493,7 @@ static int qmt_glimpse_lock(const struct lu_env *env, struct qmt_device *qmt,
                        continue;
                }
 
-               cfs_list_add_tail(&work->gl_list, &gl_list);
+               list_add_tail(&work->gl_list, &gl_list);
                work->gl_lock  = LDLM_LOCK_GET(lock);
                work->gl_flags = 0;
                work->gl_desc  = desc;
@@ -501,7 +501,7 @@ static int qmt_glimpse_lock(const struct lu_env *env, struct qmt_device *qmt,
        }
        unlock_res(res);
 
-       if (cfs_list_empty(&gl_list)) {
+       if (list_empty(&gl_list)) {
                CDEBUG(D_QUOTA, "%s: nobody to notify\n", qmt->qmt_svname);
                RETURN(0);
        }
@@ -509,12 +509,12 @@ static int qmt_glimpse_lock(const struct lu_env *env, struct qmt_device *qmt,
        /* issue glimpse callbacks to all connected slaves */
        rc = ldlm_glimpse_locks(res, &gl_list);
 
-       cfs_list_for_each_safe(pos, tmp, &gl_list) {
+       list_for_each_safe(pos, tmp, &gl_list) {
                struct ldlm_glimpse_work *work;
 
-               work = cfs_list_entry(pos, struct ldlm_glimpse_work, gl_list);
+               work = list_entry(pos, struct ldlm_glimpse_work, gl_list);
 
-               cfs_list_del(&work->gl_list);
+               list_del(&work->gl_list);
                CERROR("%s: failed to notify %s of new quota settings\n",
                       qmt->qmt_svname,
                       obd_uuid2str(&work->gl_lock->l_export->exp_client_uuid));
@@ -681,8 +681,8 @@ void qmt_id_lock_notify(struct qmt_device *qmt, struct lquota_entry *lqe)
 
        lqe_getref(lqe);
        spin_lock(&qmt->qmt_reba_lock);
-       if (!qmt->qmt_stopping && cfs_list_empty(&lqe->lqe_link)) {
-               cfs_list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
+       if (!qmt->qmt_stopping && list_empty(&lqe->lqe_link)) {
+               list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
                added = true;
        }
        spin_unlock(&qmt->qmt_reba_lock);
@@ -730,13 +730,13 @@ static int qmt_reba_thread(void *arg)
 
        while (1) {
                l_wait_event(thread->t_ctl_waitq,
-                            !cfs_list_empty(&qmt->qmt_reba_list) ||
+                            !list_empty(&qmt->qmt_reba_list) ||
                             !thread_is_running(thread), &lwi);
 
                spin_lock(&qmt->qmt_reba_lock);
-               cfs_list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
-                                            lqe_link) {
-                       cfs_list_del_init(&lqe->lqe_link);
+               list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
+                                        lqe_link) {
+                       list_del_init(&lqe->lqe_link);
                        spin_unlock(&qmt->qmt_reba_lock);
 
                        if (thread_is_running(thread))
@@ -799,5 +799,5 @@ void qmt_stop_reba_thread(struct qmt_device *qmt)
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);
        }
-       LASSERT(cfs_list_empty(&qmt->qmt_reba_list));
+       LASSERT(list_empty(&qmt->qmt_reba_list));
 }
index f5aa0c1..30ad195 100644 (file)
@@ -91,40 +91,40 @@ static unsigned qpi_hash_hash(cfs_hash_t *hs, const void *key, unsigned mask)
        return cfs_hash_u32_hash(*((__u32 *)key), mask);
 }
 
-static void *qpi_hash_key(cfs_hlist_node_t *hnode)
+static void *qpi_hash_key(struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
-       pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+       pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        return &pool->qpi_key;
 }
 
-static int qpi_hash_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int qpi_hash_keycmp(const void *key, struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
-       pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+       pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        return pool->qpi_key == *((__u32 *)key);
 }
 
-static void *qpi_hash_object(cfs_hlist_node_t *hnode)
+static void *qpi_hash_object(struct hlist_node *hnode)
 {
-       return cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+       return hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
 }
 
-static void qpi_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
-       pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+       pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        qpi_getref(pool);
 }
 
-static void qpi_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        struct qmt_pool_info *pool;
-       pool = cfs_hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
+       pool = hlist_entry(hnode, struct qmt_pool_info, qpi_hash);
        qpi_putref_locked(pool);
 }
 
-static void qpi_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void qpi_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
 {
        CERROR("Should not have any item left!\n");
 }
@@ -199,7 +199,7 @@ static int qmt_pool_alloc(const struct lu_env *env, struct qmt_device *qmt,
        OBD_ALLOC_PTR(pool);
        if (pool == NULL)
                RETURN(-ENOMEM);
-       CFS_INIT_LIST_HEAD(&pool->qpi_linkage);
+       INIT_LIST_HEAD(&pool->qpi_linkage);
 
        /* assign key used by hash functions */
        pool->qpi_key = pool_id + (pool_type << 16);
@@ -238,7 +238,7 @@ static int qmt_pool_alloc(const struct lu_env *env, struct qmt_device *qmt,
        }
 
        /* add to qmt pool list */
-       cfs_list_add_tail(&pool->qpi_linkage, &qmt->qmt_pool_list);
+       list_add_tail(&pool->qpi_linkage, &qmt->qmt_pool_list);
        EXIT;
 out:
        if (rc)
@@ -294,7 +294,7 @@ static void qmt_pool_free(const struct lu_env *env, struct qmt_pool_info *pool)
                pool->qpi_qmt = NULL;
        }
 
-       LASSERT(cfs_list_empty(&pool->qpi_linkage));
+       LASSERT(list_empty(&pool->qpi_linkage));
        OBD_FREE_PTR(pool);
 }
 
@@ -345,27 +345,27 @@ static struct qmt_pool_info *qmt_pool_lookup(const struct lu_env *env,
 void qmt_pool_fini(const struct lu_env *env, struct qmt_device *qmt)
 {
        struct qmt_pool_info    *pool;
-       cfs_list_t              *pos, *n;
+       struct list_head        *pos, *n;
        ENTRY;
 
        if (qmt->qmt_pool_hash == NULL)
                RETURN_EXIT;
 
        /* parse list of pool and destroy each element */
-       cfs_list_for_each_safe(pos, n, &qmt->qmt_pool_list) {
-               pool = cfs_list_entry(pos, struct qmt_pool_info,
-                                     qpi_linkage);
+       list_for_each_safe(pos, n, &qmt->qmt_pool_list) {
+               pool = list_entry(pos, struct qmt_pool_info,
+                                 qpi_linkage);
                /* remove from hash */
                cfs_hash_del(qmt->qmt_pool_hash, &pool->qpi_key,
                             &pool->qpi_hash);
 
                /* remove from list */
-               cfs_list_del_init(&pool->qpi_linkage);
+               list_del_init(&pool->qpi_linkage);
 
                /* release extra reference taken in qmt_pool_alloc */
                qpi_putref(env, pool);
        }
-       LASSERT(cfs_list_empty(&qmt->qmt_pool_list));
+       LASSERT(list_empty(&qmt->qmt_pool_list));
 
        cfs_hash_putref(qmt->qmt_pool_hash);
        qmt->qmt_pool_hash = NULL;
@@ -404,7 +404,7 @@ int qmt_pool_init(const struct lu_env *env, struct qmt_device *qmt)
        }
 
        /* initialize pool list */
-       CFS_INIT_LIST_HEAD(&qmt->qmt_pool_list);
+       INIT_LIST_HEAD(&qmt->qmt_pool_list);
 
        /* Instantiate pool master for the default data and metadata pool (both
         * have pool ID equals to 0).
@@ -451,7 +451,7 @@ int qmt_pool_prepare(const struct lu_env *env, struct qmt_device *qmt,
        struct qmt_pool_info    *pool;
        struct dt_device        *dev = NULL;
        dt_obj_version_t         version;
-       cfs_list_t              *pos;
+       struct list_head        *pos;
        int                      rc = 0, qtype;
        ENTRY;
 
@@ -459,13 +459,13 @@ int qmt_pool_prepare(const struct lu_env *env, struct qmt_device *qmt,
 
        /* iterate over each pool in the hash and allocate a quota site for each
         * one. This involves creating a global index file on disk */
-       cfs_list_for_each(pos, &qmt->qmt_pool_list) {
+       list_for_each(pos, &qmt->qmt_pool_list) {
                struct dt_object        *obj;
                int                      pool_type, pool_id;
                struct lquota_entry     *lqe;
 
-               pool = cfs_list_entry(pos, struct qmt_pool_info,
-                                     qpi_linkage);
+               pool = list_entry(pos, struct qmt_pool_info,
+                                 qpi_linkage);
 
                pool_id   = pool->qpi_key & 0x0000ffff;
                pool_type = pool->qpi_key >> 16;
index 1ad6fc9..feddff4 100644 (file)
@@ -35,7 +35,7 @@
 
 #include "qsd_internal.h"
 
-static CFS_LIST_HEAD(qfs_list);
+static struct list_head qfs_list = LIST_HEAD_INIT(qfs_list);
 /* protect the qfs_list */
 static DEFINE_SPINLOCK(qfs_list_lock);
 
@@ -53,8 +53,8 @@ void qsd_put_fsinfo(struct qsd_fsinfo *qfs)
        LASSERT(qfs->qfs_ref > 0);
        qfs->qfs_ref--;
        if (qfs->qfs_ref == 0) {
-               LASSERT(cfs_list_empty(&qfs->qfs_qsd_list));
-               cfs_list_del(&qfs->qfs_link);
+               LASSERT(list_empty(&qfs->qfs_qsd_list));
+               list_del(&qfs->qfs_link);
                OBD_FREE_PTR(qfs);
        }
        spin_unlock(&qfs_list_lock);
@@ -89,14 +89,14 @@ struct qsd_fsinfo *qsd_get_fsinfo(char *name, bool create)
                        RETURN(NULL);
 
                mutex_init(&new->qfs_mutex);
-               CFS_INIT_LIST_HEAD(&new->qfs_qsd_list);
+               INIT_LIST_HEAD(&new->qfs_qsd_list);
                strcpy(new->qfs_name, name);
                new->qfs_ref = 1;
        }
 
        /* search in the fsinfo list */
        spin_lock(&qfs_list_lock);
-       cfs_list_for_each_entry(qfs, &qfs_list, qfs_link) {
+       list_for_each_entry(qfs, &qfs_list, qfs_link) {
                if (!strcmp(qfs->qfs_name, name)) {
                        qfs->qfs_ref++;
                        goto out;
@@ -107,7 +107,7 @@ struct qsd_fsinfo *qsd_get_fsinfo(char *name, bool create)
 
        if (new) {
                /* not found, but we were asked to create a new one */
-               cfs_list_add_tail(&new->qfs_link, &qfs_list);
+               list_add_tail(&new->qfs_link, &qfs_list);
                qfs = new;
                new = NULL;
        }
@@ -175,7 +175,7 @@ int qsd_process_config(struct lustre_cfg *lcfg)
                struct qsd_instance     *qsd;
                struct qsd_qtype_info   *qqi;
 
-               cfs_list_for_each_entry(qsd, &qfs->qfs_qsd_list, qsd_link) {
+               list_for_each_entry(qsd, &qfs->qfs_qsd_list, qsd_link) {
                        bool    skip = false;
                        int     type;
 
index b533fdd..fb6bccd 100644 (file)
@@ -76,10 +76,10 @@ struct qsd_instance {
        struct qsd_fsinfo       *qsd_fsinfo;
 
        /* link into qfs_qsd_list of qfs_fsinfo */
-       cfs_list_t               qsd_link;
+       struct list_head         qsd_link;
 
        /* list of lqe entry which might need quota space adjustment */
-       cfs_list_t               qsd_adjust_list;
+       struct list_head         qsd_adjust_list;
 
        /* lock protecting adjust list */
        spinlock_t               qsd_adjust_lock;
@@ -88,7 +88,7 @@ struct qsd_instance {
        struct ptlrpc_thread     qsd_upd_thread;
 
        /* list of update tasks */
-       cfs_list_t               qsd_upd_list;
+       struct list_head         qsd_upd_list;
 
        /* r/w spinlock protecting:
         * - the state flags
@@ -163,9 +163,9 @@ struct qsd_qtype_info {
        struct lprocfs_stats    *qqi_stats;
 
        /* deferred update for the global index copy */
-       cfs_list_t               qqi_deferred_glb;
+       struct list_head         qqi_deferred_glb;
        /* deferred update for the slave index copy */
-       cfs_list_t               qqi_deferred_slv;
+       struct list_head         qqi_deferred_slv;
 
        /* Various flags representing the current state of the slave for this
         * quota type. */
@@ -191,11 +191,11 @@ struct qsd_fsinfo {
        unsigned int            qfs_enabled[LQUOTA_NR_RES];
 
        /* list of all qsd_instance for this fs */
-       cfs_list_t              qfs_qsd_list;
+       struct list_head        qfs_qsd_list;
        struct mutex            qfs_mutex;
 
        /* link to the global quota fsinfo list.  */
-       cfs_list_t              qfs_link;
+       struct list_head        qfs_link;
 
        /* reference count */
        int                     qfs_ref;
@@ -228,7 +228,7 @@ static inline void qqi_putref(struct qsd_qtype_info *qqi)
 
 /* udpate record for slave & global index copy */
 struct qsd_upd_rec {
-       cfs_list_t              qur_link; /* link into qsd_upd_list */
+       struct list_head        qur_link; /* link into qsd_upd_list */
        union lquota_id         qur_qid;
        union lquota_rec        qur_rec;
        struct qsd_qtype_info  *qur_qqi;
index 3e396d5..331a219 100644 (file)
@@ -281,8 +281,8 @@ static void qsd_qtype_fini(const struct lu_env *env, struct qsd_instance *qsd,
        qsd->qsd_type_array[qtype] = NULL;
 
        /* all deferred work lists should be empty */
-       LASSERT(cfs_list_empty(&qqi->qqi_deferred_glb));
-       LASSERT(cfs_list_empty(&qqi->qqi_deferred_slv));
+       LASSERT(list_empty(&qqi->qqi_deferred_glb));
+       LASSERT(list_empty(&qqi->qqi_deferred_slv));
 
        /* shutdown lquota site */
        if (qqi->qqi_site != NULL && !IS_ERR(qqi->qqi_site)) {
@@ -383,8 +383,8 @@ static int qsd_qtype_init(const struct lu_env *env, struct qsd_instance *qsd,
        qqi->qqi_reint        = false;
        init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
        thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
-       CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
-       CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
+       INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
+       INIT_LIST_HEAD(&qqi->qqi_deferred_slv);
 
        /* open accounting object */
        LASSERT(qqi->qqi_acct_obj == NULL);
@@ -514,7 +514,7 @@ void qsd_fini(const struct lu_env *env, struct qsd_instance *qsd)
        if (qsd->qsd_fsinfo != NULL) {
                mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
                /* remove from the list of fsinfo */
-               cfs_list_del_init(&qsd->qsd_link);
+               list_del_init(&qsd->qsd_link);
                mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);
                qsd_put_fsinfo(qsd->qsd_fsinfo);
                qsd->qsd_fsinfo = NULL;
@@ -573,12 +573,12 @@ struct qsd_instance *qsd_init(const struct lu_env *env, char *svname,
 
        /* generic initializations */
        rwlock_init(&qsd->qsd_lock);
-       CFS_INIT_LIST_HEAD(&qsd->qsd_link);
+       INIT_LIST_HEAD(&qsd->qsd_link);
        thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
        init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq);
-       CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
+       INIT_LIST_HEAD(&qsd->qsd_upd_list);
        spin_lock_init(&qsd->qsd_adjust_lock);
-       CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
+       INIT_LIST_HEAD(&qsd->qsd_adjust_list);
        qsd->qsd_prepared = false;
        qsd->qsd_started = false;
 
index fea082f..428b1a6 100644 (file)
@@ -553,12 +553,12 @@ void qsd_stop_reint_thread(struct qsd_qtype_info *qqi)
 }
 
 static int qsd_entry_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                            cfs_hlist_node_t *hnode, void *data)
+                            struct hlist_node *hnode, void *data)
 {
        struct lquota_entry     *lqe;
        int                     *pending = (int *)data;
 
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        LASSERT(atomic_read(&lqe->lqe_ref) > 0);
 
        lqe_read_lock(lqe);
@@ -579,9 +579,9 @@ static bool qsd_pending_updates(struct qsd_qtype_info *qqi)
 
        /* any pending quota adjust? */
        spin_lock(&qsd->qsd_adjust_lock);
-       cfs_list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
+       list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
                if (lqe2qqi(lqe) == qqi) {
-                       cfs_list_del_init(&lqe->lqe_link);
+                       list_del_init(&lqe->lqe_link);
                        lqe_putref(lqe);
                }
        }
@@ -589,7 +589,7 @@ static bool qsd_pending_updates(struct qsd_qtype_info *qqi)
 
        /* any pending updates? */
        read_lock(&qsd->qsd_lock);
-       cfs_list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
+       list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
                if (upd->qur_qqi == qqi) {
                        read_unlock(&qsd->qsd_lock);
                        CDEBUG(D_QUOTA, "%s: pending %s updates for type:%d.\n",
index 13f07fd..17dfb55 100644 (file)
@@ -61,7 +61,7 @@ static struct qsd_upd_rec *qsd_upd_alloc(struct qsd_qtype_info *qqi,
        }
 
        /* fill it */
-       CFS_INIT_LIST_HEAD(&upd->qur_link);
+       INIT_LIST_HEAD(&upd->qur_link);
        upd->qur_qqi = qqi;
        upd->qur_lqe = lqe;
        if (lqe)
@@ -97,7 +97,7 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
 }
 
 /* must hold the qsd_lock */
-static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+static void qsd_add_deferred(struct qsd_instance *qsd, struct list_head *list,
                             struct qsd_upd_rec *upd)
 {
        struct qsd_upd_rec      *tmp, *n;
@@ -111,7 +111,7 @@ static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
        }
 
        /* Sort the updates in ascending order */
-       cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
+       list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
 
                /* There could be some legacy records which have duplicated
                 * version. Imagine following scenario: slave received global
@@ -124,21 +124,21 @@ static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
                        LASSERT(tmp->qur_lqe);
                        LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
                                     "with ver:"LPU64"", tmp->qur_ver);
-                       cfs_list_del_init(&tmp->qur_link);
+                       list_del_init(&tmp->qur_link);
                        qsd_upd_free(tmp);
                } else if (upd->qur_ver < tmp->qur_ver) {
                        continue;
                } else {
-                       cfs_list_add_tail(&upd->qur_link, &tmp->qur_link);
+                       list_add_tail(&upd->qur_link, &tmp->qur_link);
                        return;
                }
        }
-       cfs_list_add(&upd->qur_link, list);
+       list_add(&upd->qur_link, list);
 }
 
 /* must hold the qsd_lock */
-static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
-                                __u64 ver)
+static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi,
+                                struct list_head *list, __u64 ver)
 {
        struct qsd_upd_rec      *upd, *tmp;
        ENTRY;
@@ -146,10 +146,10 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
        /* Get the first update record in the list, which has the smallest
         * version, discard all records with versions smaller than the current
         * one */
-       cfs_list_for_each_entry_safe(upd, tmp, list, qur_link) {
+       list_for_each_entry_safe(upd, tmp, list, qur_link) {
                if (upd->qur_ver <= ver) {
                        /* drop this update */
-                       cfs_list_del_init(&upd->qur_link);
+                       list_del_init(&upd->qur_link);
                        CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
                               LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
                               qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
@@ -161,7 +161,7 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
        }
 
        /* No remaining deferred update */
-       if (cfs_list_empty(list))
+       if (list_empty(list))
                RETURN_EXIT;
 
        CDEBUG(D_QUOTA, "%s: found deferred update record. "
@@ -188,8 +188,8 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
  */
 void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
 {
-       cfs_list_t      *list;
-       __u64           *idx_ver;
+       struct list_head *list;
+       __u64            *idx_ver;
 
        idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
        list    = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
@@ -262,8 +262,8 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                /* Out of order update (the one with smaller version hasn't
                 * reached slave or hasn't been flushed to disk yet), or
                 * the reintegration is in progress. Defer the update. */
-               cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
-                                           &qqi->qqi_deferred_slv;
+               struct list_head *list = global ? &qqi->qqi_deferred_glb :
+                                                 &qqi->qqi_deferred_slv;
                qsd_add_deferred(qsd, list, upd);
        }
 
@@ -326,13 +326,13 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
 
        /* the lqe is being queued for the per-ID lock cancel, we should
         * cancel the lock cancel and re-add it for quota adjust */
-       if (!cfs_list_empty(&lqe->lqe_link) &&
+       if (!list_empty(&lqe->lqe_link) &&
            lqe->lqe_adjust_time == 0) {
-               cfs_list_del_init(&lqe->lqe_link);
+               list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
 
-       if (cfs_list_empty(&lqe->lqe_link)) {
+       if (list_empty(&lqe->lqe_link)) {
                if (cancel)
                        lqe->lqe_adjust_time = 0;
                else
@@ -341,10 +341,10 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
                                cfs_time_current_64();
                /* lqe reference transfered to list */
                if (defer)
-                       cfs_list_add_tail(&lqe->lqe_link,
+                       list_add_tail(&lqe->lqe_link,
                                          &qsd->qsd_adjust_list);
                else
-                       cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
+                       list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
                added = true;
        }
        spin_unlock(&qsd->qsd_adjust_lock);
@@ -357,19 +357,19 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
 
 /* return true if there is pending writeback records or the pending
  * adjust requests */
-static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
+static bool qsd_job_pending(struct qsd_instance *qsd, struct list_head *upd,
                            bool *uptodate)
 {
        bool    job_pending = false;
        int     qtype;
 
-       LASSERT(cfs_list_empty(upd));
+       LASSERT(list_empty(upd));
        *uptodate = true;
 
        spin_lock(&qsd->qsd_adjust_lock);
-       if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
+       if (!list_empty(&qsd->qsd_adjust_list)) {
                struct lquota_entry *lqe;
-               lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
+               lqe = list_entry(qsd->qsd_adjust_list.next,
                                     struct lquota_entry, lqe_link);
                if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
                                        cfs_time_current_64()))
@@ -378,8 +378,8 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
        spin_unlock(&qsd->qsd_adjust_lock);
 
        write_lock(&qsd->qsd_lock);
-       if (!cfs_list_empty(&qsd->qsd_upd_list)) {
-               cfs_list_splice_init(&qsd->qsd_upd_list, upd);
+       if (!list_empty(&qsd->qsd_upd_list)) {
+               list_splice_init(&qsd->qsd_upd_list, upd);
                job_pending = true;
        }
 
@@ -412,7 +412,7 @@ static int qsd_upd_thread(void *arg)
        struct qsd_instance     *qsd = (struct qsd_instance *)arg;
        struct ptlrpc_thread    *thread = &qsd->qsd_upd_thread;
        struct l_wait_info       lwi;
-       cfs_list_t               queue;
+       struct list_head         queue;
        struct qsd_upd_rec      *upd, *n;
        struct lu_env           *env;
        int                      qtype, rc = 0;
@@ -435,29 +435,29 @@ static int qsd_upd_thread(void *arg)
        thread_set_flags(thread, SVC_RUNNING);
        wake_up(&thread->t_ctl_waitq);
 
-       CFS_INIT_LIST_HEAD(&queue);
+       INIT_LIST_HEAD(&queue);
        lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
        while (1) {
                l_wait_event(thread->t_ctl_waitq,
                             qsd_job_pending(qsd, &queue, &uptodate) ||
                             !thread_is_running(thread), &lwi);
 
-               cfs_list_for_each_entry_safe(upd, n, &queue, qur_link) {
-                       cfs_list_del_init(&upd->qur_link);
+               list_for_each_entry_safe(upd, n, &queue, qur_link) {
+                       list_del_init(&upd->qur_link);
                        qsd_process_upd(env, upd);
                        qsd_upd_free(upd);
                }
 
                spin_lock(&qsd->qsd_adjust_lock);
                cur_time = cfs_time_current_64();
-               cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
-                                            lqe_link) {
+               list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
+                                        lqe_link) {
                        /* deferred items are sorted by time */
                        if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
                                                 cur_time))
                                break;
 
-                       cfs_list_del_init(&lqe->lqe_link);
+                       list_del_init(&lqe->lqe_link);
                        spin_unlock(&qsd->qsd_adjust_lock);
 
                        if (thread_is_running(thread) && uptodate) {
@@ -523,8 +523,8 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        continue;
 
                write_lock(&qsd->qsd_lock);
-               cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
-                                            qur_link) {
+               list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
+                                        qur_link) {
                        CWARN("%s: Free global deferred upd: ID:"LPU64", "
                              "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
                              upd->qur_qid.qid_uid, upd->qur_ver,
@@ -532,8 +532,8 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        list_del_init(&upd->qur_link);
                        qsd_upd_free(upd);
                }
-               cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
-                                            qur_link) {
+               list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
+                                        qur_link) {
                        CWARN("%s: Free slave deferred upd: ID:"LPU64", "
                              "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
                              upd->qur_qid.qid_uid, upd->qur_ver,
@@ -550,10 +550,10 @@ static void qsd_cleanup_adjust(struct qsd_instance *qsd)
        struct lquota_entry     *lqe;
 
        spin_lock(&qsd->qsd_adjust_lock);
-       while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
-               lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
-                                    struct lquota_entry, lqe_link);
-               cfs_list_del_init(&lqe->lqe_link);
+       while (!list_empty(&qsd->qsd_adjust_list)) {
+               lqe = list_entry(qsd->qsd_adjust_list.next,
+                                struct lquota_entry, lqe_link);
+               list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
        spin_unlock(&qsd->qsd_adjust_lock);
index 7b6906a..4c5edb0 100644 (file)
@@ -61,7 +61,7 @@
 
 static struct it_node {
         struct interval_node node;
-        cfs_list_t list;
+       struct list_head list;
         int hit, valid;
 } *it_array;
 static int it_count;
@@ -377,7 +377,7 @@ static int it_test_performance(struct interval_node *root, unsigned long len)
         /* list */
         contended_count = 0;
         gettimeofday(&start, NULL);
-        cfs_list_for_each_entry(n, &header, list) {
+       list_for_each_entry(n, &header, list) {
                 if (extent_overlapped(&ext, &n->node.in_extent)) {
                         count = LOOP_COUNT;
                         while (count--);
@@ -422,7 +422,7 @@ static struct interval_node *it_test_helper(struct interval_node *root)
                                 __F(&n->node.in_extent));
                         interval_erase(&n->node, &root);
                         n->valid = 0;
-                        cfs_list_del_init(&n->list);
+                       list_del_init(&n->list);
                 } else {
                         __u64 low, high;
                         low = (random() % max_count) & ALIGN_MASK;
@@ -435,7 +435,7 @@ static struct interval_node *it_test_helper(struct interval_node *root)
                         dprintf("Adding a node "__S"\n",
                                 __F(&n->node.in_extent));
                         n->valid = 1;
-                        cfs_list_add(&n->list, &header);
+                       list_add(&n->list, &header);
                 }
         }
 
@@ -472,9 +472,9 @@ static struct interval_node *it_test_init(int count)
                 n->hit = 0;
                 n->valid = 1;
                 if (i == 0)
-                        cfs_list_add_tail(&n->list, &header);
+                       list_add_tail(&n->list, &header);
                 else
-                        cfs_list_add_tail(&n->list, &it_array[rand()%i].list);
+                       list_add_tail(&n->list, &it_array[rand()%i].list);
         }
 
         return root;
index 3cafd7c..a92984a 100644 (file)
@@ -66,19 +66,19 @@ int verbose;
 bool dry_run;
 
 struct obd_group_info {
-       __u64           grp_last_id;
-       __u64           grp_seq;
-       cfs_list_t      grp_list;
+       __u64                   grp_last_id;
+       __u64                   grp_seq;
+       struct list_head        grp_list;
 };
 
-cfs_list_t grp_info_list;
+struct list_head grp_info_list;
 
-static void grp_info_list_destroy(cfs_list_t *list)
+static void grp_info_list_destroy(struct list_head *list)
 {
        struct obd_group_info *grp, *tmp;
 
-       cfs_list_for_each_entry_safe(grp, tmp, list, grp_list) {
-               cfs_list_del_init(&grp->grp_list);
+       list_for_each_entry_safe(grp, tmp, list, grp_list) {
+               list_del_init(&grp->grp_list);
                free(grp);
        }
 }
@@ -189,18 +189,18 @@ static __u64 read_last_id(char *file_path)
        return le64_to_cpu(last_id);
 }
 
-struct obd_group_info *find_or_create_grp(cfs_list_t *list, __u64 seq,
+struct obd_group_info *find_or_create_grp(struct list_head *list, __u64 seq,
                                          const char *mount)
 {
        struct obd_group_info   *grp;
-       cfs_list_t              *entry;
+       struct list_head        *entry;
        char                    tmp_path[PATH_MAX];
        char                    seq_name[32];
        int                     retval;
        __u64                   tmp_last_id;
 
-       cfs_list_for_each(entry, list) {
-               grp = (struct obd_group_info *)cfs_list_entry(entry,
+       list_for_each(entry, list) {
+               grp = (struct obd_group_info *)list_entry(entry,
                                                struct obd_group_info,
                                                grp_list);
                if (grp->grp_seq == seq)
@@ -247,7 +247,7 @@ struct obd_group_info *find_or_create_grp(cfs_list_t *list, __u64 seq,
        grp->grp_last_id = tmp_last_id;
        grp->grp_seq = seq;
 
-       cfs_list_add(&grp->grp_list, list);
+       list_add(&grp->grp_list, list);
        return grp;
 }