Whamcloud - gitweb
LU-3963 client: move llite,lov,target,obdecho to linux list api 44/9944/3
authorJames Simmons <uja.ornl@gmail.com>
Tue, 22 Apr 2014 18:16:34 +0000 (14:16 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 7 May 2014 02:11:23 +0000 (02:11 +0000)
Move several of the client components over to the linux
list api.

Change-Id: I8fff62e7c8c3c2f9bf48b0074e574b83770442c3
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/9944
Tested-by: Jenkins
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
26 files changed:
lustre/llite/file.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_rmtacl.c
lustre/llite/remote_perm.c
lustre/llite/rw.c
lustre/llite/statahead.c
lustre/llite/super25.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_page.c
lustre/llite/xattr_cache.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_lock.c
lustre/lov/lov_obd.c
lustre/lov/lov_page.c
lustre/lov/lov_pool.c
lustre/lov/lov_request.c
lustre/lov/lovsub_lock.c
lustre/obdecho/echo_client.c
lustre/target/out_lib.c
lustre/target/tgt_lastrcvd.c

index 9debc22..99dc465 100644 (file)
@@ -3608,15 +3608,15 @@ struct inode_operations ll_file_inode_operations = {
 /* dynamic ioctl number support routins */
 static struct llioc_ctl_data {
        struct rw_semaphore     ioc_sem;
-        cfs_list_t              ioc_head;
+       struct list_head        ioc_head;
 } llioc = {
-        __RWSEM_INITIALIZER(llioc.ioc_sem),
-        CFS_LIST_HEAD_INIT(llioc.ioc_head)
+       __RWSEM_INITIALIZER(llioc.ioc_sem),
+       LIST_HEAD_INIT(llioc.ioc_head)
 };
 
 
 struct llioc_data {
-        cfs_list_t              iocd_list;
+       struct list_head        iocd_list;
         unsigned int            iocd_size;
         llioc_callback_t        iocd_cb;
         unsigned int            iocd_count;
@@ -3645,7 +3645,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
         memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
 
        down_write(&llioc.ioc_sem);
-        cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+       list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
        up_write(&llioc.ioc_sem);
 
         RETURN(in_data);
@@ -3659,11 +3659,11 @@ void ll_iocontrol_unregister(void *magic)
                 return;
 
        down_write(&llioc.ioc_sem);
-        cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+       list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
                 if (tmp == magic) {
                         unsigned int size = tmp->iocd_size;
 
-                        cfs_list_del(&tmp->iocd_list);
+                       list_del(&tmp->iocd_list);
                        up_write(&llioc.ioc_sem);
 
                         OBD_FREE(tmp, size);
@@ -3687,7 +3687,7 @@ ll_iocontrol_call(struct inode *inode, struct file *file,
         int rc = -EINVAL, i;
 
        down_read(&llioc.ioc_sem);
-        cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+       list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
                 for (i = 0; i < data->iocd_count; i++) {
                         if (cmd != data->iocd_cmd[i])
                                 continue;
index e5effcf..7d6e8d9 100644 (file)
@@ -56,9 +56,9 @@
  */
 
 /* capas for oss writeback and those failed to renew */
-static CFS_LIST_HEAD(ll_idle_capas);
+static struct list_head ll_idle_capas = LIST_HEAD_INIT(ll_idle_capas);
 static struct ptlrpc_thread ll_capa_thread;
-static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
+static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
 
 /* llite capa renewal timer */
 struct timer_list ll_capa_timer;
@@ -101,14 +101,14 @@ static inline int have_expired_capa(void)
         * expired capa, return 1.
         */
        spin_lock(&capa_lock);
-        if (!cfs_list_empty(ll_capa_list)) {
-                ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
+       if (!list_empty(ll_capa_list)) {
+               ocapa = list_entry(ll_capa_list->next, struct obd_capa,
                                        c_list);
                 expired = capa_is_to_expire(ocapa);
                 if (!expired)
                         update_capa_timer(ocapa, capa_renewal_time(ocapa));
-        } else if (!cfs_list_empty(&ll_idle_capas)) {
-                ocapa = cfs_list_entry(ll_idle_capas.next, struct obd_capa,
+       } else if (!list_empty(&ll_idle_capas)) {
+               ocapa = list_entry(ll_idle_capas.next, struct obd_capa,
                                        c_list);
                 expired = capa_is_expired(ocapa);
                 if (!expired)
@@ -121,13 +121,13 @@ static inline int have_expired_capa(void)
        return expired;
 }
 
-static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
+static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
 {
         struct obd_capa *tmp;
-        cfs_list_t *before = NULL;
+       struct list_head *before = NULL;
 
         /* TODO: client capa is sorted by expiry, this could be optimized */
-        cfs_list_for_each_entry_reverse(tmp, head, c_list) {
+       list_for_each_entry_reverse(tmp, head, c_list) {
                 if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
                         before = &tmp->c_list;
                         break;
@@ -135,7 +135,7 @@ static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
         }
 
         LASSERT(&ocapa->c_list != before);
-        cfs_list_add(&ocapa->c_list, before ?: head);
+       list_add(&ocapa->c_list, before ?: head);
 }
 
 static inline int obd_capa_open_count(struct obd_capa *oc)
@@ -152,11 +152,11 @@ static void ll_delete_capa(struct obd_capa *ocapa)
                 LASSERT(lli->lli_mds_capa == ocapa);
                 lli->lli_mds_capa = NULL;
         } else if (capa_for_oss(&ocapa->c_capa)) {
-                cfs_list_del_init(&ocapa->u.cli.lli_list);
+               list_del_init(&ocapa->u.cli.lli_list);
         }
 
         DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
-        cfs_list_del_init(&ocapa->c_list);
+       list_del_init(&ocapa->c_list);
         capa_count[CAPA_SITE_CLIENT]--;
         /* release the ref when alloc */
         capa_put(ocapa);
@@ -190,7 +190,7 @@ static int capa_thread_main(void *unused)
                 next = NULL;
 
                spin_lock(&capa_lock);
-                cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+               list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
                         __u64 ibits;
 
                         LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
@@ -200,7 +200,7 @@ static int capa_thread_main(void *unused)
                                 break;
                         }
 
-                        cfs_list_del_init(&ocapa->c_list);
+                       list_del_init(&ocapa->c_list);
 
                         /* for MDS capability, only renew those which belong to
                          * dir, or its inode is opened, or client holds LOOKUP
@@ -255,7 +255,7 @@ static int capa_thread_main(void *unused)
                 if (next)
                         update_capa_timer(next, capa_renewal_time(next));
 
-                cfs_list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
+               list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
                                              c_list) {
                         if (!capa_is_expired(ocapa)) {
                                 if (!next)
@@ -333,7 +333,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
                 opc == CAPA_OPC_OSS_TRUNC);
 
        spin_lock(&capa_lock);
-        cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+       list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
                 if (capa_is_expired(ocapa))
                         continue;
                 if ((opc & CAPA_OPC_OSS_WRITE) &&
@@ -429,7 +429,7 @@ static struct obd_capa *do_lookup_oss_capa(struct inode *inode, int opc)
         struct obd_capa *ocapa;
 
         /* inside capa_lock */
-        cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+       list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
                 if ((capa_opc(&ocapa->c_capa) & opc) != opc)
                         continue;
 
@@ -449,18 +449,18 @@ static inline void inode_add_oss_capa(struct inode *inode,
 {
         struct ll_inode_info *lli = ll_i2info(inode);
         struct obd_capa *tmp;
-        cfs_list_t *next = NULL;
+       struct list_head *next = NULL;
 
         /* capa is sorted in lli_oss_capas so lookup can always find the
          * latest one */
-        cfs_list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
+       list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
                 if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
                         next = &tmp->u.cli.lli_list;
                         break;
                 }
         }
         LASSERT(&ocapa->u.cli.lli_list != next);
-        cfs_list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
+       list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
 }
 
 static struct obd_capa *do_add_oss_capa(struct inode *inode,
@@ -477,7 +477,7 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
         old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
         if (!old) {
                 ocapa->u.cli.inode = inode;
-                CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+               INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
                 capa_count[CAPA_SITE_CLIENT]++;
 
                 DEBUG_CAPA(D_SEC, capa, "add OSS");
@@ -557,7 +557,7 @@ static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
                         }
                 }
 
-                cfs_list_del_init(&ocapa->c_list);
+               list_del_init(&ocapa->c_list);
                 sort_add_capa(ocapa, &ll_idle_capas);
                spin_unlock(&capa_lock);
 
@@ -579,7 +579,7 @@ static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
        DEBUG_CAPA(D_SEC, capa, "renew");
        EXIT;
 retry:
-       cfs_list_del_init(&ocapa->c_list);
+       list_del_init(&ocapa->c_list);
        sort_add_capa(ocapa, ll_capa_list);
        update_capa_timer(ocapa, capa_renewal_time(ocapa));
        spin_unlock(&capa_lock);
@@ -645,7 +645,7 @@ void ll_clear_inode_capas(struct inode *inode)
        if (ocapa)
                ll_delete_capa(ocapa);
 
-       cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+       list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
                                     u.cli.lli_list)
                ll_delete_capa(ocapa);
        spin_unlock(&capa_lock);
index 8bff949..ec01572 100644 (file)
@@ -53,8 +53,8 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
        ENTRY;
        spin_lock(&lli->lli_lock);
        lli->lli_flags |= LLIF_SOM_DIRTY;
-       if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
-               cfs_list_add(&page->cpg_pending_linkage,
+       if (page != NULL && list_empty(&page->cpg_pending_linkage))
+               list_add(&page->cpg_pending_linkage,
                             &club->cob_pending_list);
        spin_unlock(&lli->lli_lock);
        EXIT;
@@ -68,8 +68,8 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
 
        ENTRY;
        spin_lock(&lli->lli_lock);
-       if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
-               cfs_list_del_init(&page->cpg_pending_linkage);
+       if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+               list_del_init(&page->cpg_pending_linkage);
                rc = 1;
        }
        spin_unlock(&lli->lli_lock);
@@ -91,7 +91,7 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
         lli->lli_flags |= flags;
 
         if ((lli->lli_flags & LLIF_DONE_WRITING) &&
-            cfs_list_empty(&club->cob_pending_list)) {
+           list_empty(&club->cob_pending_list)) {
                 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
 
                 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
@@ -102,10 +102,10 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                /* DONE_WRITING is allowed and inode has no dirty page. */
                spin_lock(&lcq->lcq_lock);
 
-               LASSERT(cfs_list_empty(&lli->lli_close_list));
+               LASSERT(list_empty(&lli->lli_close_list));
                CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
                       PFID(ll_inode2fid(inode)));
-                cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+               list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
 
                /* Avoid a concurrent insertion into the close thread queue:
                 * an inode is already in the close thread, open(), write(),
@@ -153,7 +153,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
        ENTRY;
 
        spin_lock(&lli->lli_lock);
-       if (!(cfs_list_empty(&club->cob_pending_list))) {
+       if (!(list_empty(&club->cob_pending_list))) {
                if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
                        LASSERT(*och != NULL);
                        LASSERT(lli->lli_pending_och == NULL);
@@ -205,7 +205,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
                }
        }
 
-       LASSERT(cfs_list_empty(&club->cob_pending_list));
+       LASSERT(list_empty(&club->cob_pending_list));
        lli->lli_flags &= ~LLIF_SOM_DIRTY;
        spin_unlock(&lli->lli_lock);
        ll_done_writing_attr(inode, op_data);
@@ -391,7 +391,7 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret)
                return -ENOMEM;
 
        spin_lock_init(&lcq->lcq_lock);
-       CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+       INIT_LIST_HEAD(&lcq->lcq_head);
        init_waitqueue_head(&lcq->lcq_waitq);
        init_completion(&lcq->lcq_comp);
 
index de06b82..71ebd55 100644 (file)
@@ -96,7 +96,7 @@ struct ll_getname_data {
 
 /* llite setxid/access permission for user on remote client */
 struct ll_remote_perm {
-        cfs_hlist_node_t        lrp_list;
+       struct hlist_node       lrp_list;
         uid_t                   lrp_uid;
         gid_t                   lrp_gid;
         uid_t                   lrp_fsuid;
@@ -132,8 +132,8 @@ struct ll_inode_info {
        spinlock_t                      lli_lock;
        struct posix_acl                *lli_posix_acl;
 
-       cfs_hlist_head_t                *lli_remote_perms;
-       struct mutex                            lli_rmtperm_mutex;
+       struct hlist_head               *lli_remote_perms;
+       struct mutex                    lli_rmtperm_mutex;
 
         /* identifying fields for both metadata and data stacks. */
         struct lu_fid                   lli_fid;
@@ -225,7 +225,7 @@ struct ll_inode_info {
 
                        struct rw_semaphore             f_glimpse_sem;
                        cfs_time_t                      f_glimpse_time;
-                       cfs_list_t                      f_agl_list;
+                       struct list_head                        f_agl_list;
                        __u64                           f_agl_index;
 
                        /* for writepage() only to communicate to fsync */
@@ -456,20 +456,20 @@ enum stats_track_type {
 #define RCE_HASHES      32
 
 struct rmtacl_ctl_entry {
-        cfs_list_t       rce_list;
+       struct list_head       rce_list;
         pid_t            rce_key; /* hash key */
         int              rce_ops; /* acl operation type */
 };
 
 struct rmtacl_ctl_table {
        spinlock_t      rct_lock;
-       cfs_list_t      rct_entries[RCE_HASHES];
+       struct list_head        rct_entries[RCE_HASHES];
 };
 
 #define EE_HASHES       32
 
 struct eacl_entry {
-        cfs_list_t            ee_list;
+       struct list_head            ee_list;
         pid_t                 ee_key; /* hash key */
         struct lu_fid         ee_fid;
         int                   ee_type; /* ACL type for ACCESS or DEFAULT */
@@ -478,11 +478,11 @@ struct eacl_entry {
 
 struct eacl_table {
        spinlock_t      et_lock;
-       cfs_list_t      et_entries[EE_HASHES];
+       struct list_head        et_entries[EE_HASHES];
 };
 
 struct ll_sb_info {
-       cfs_list_t                ll_list;
+       struct list_head                  ll_list;
        /* this protects pglist and ra_info.  It isn't safe to
         * grab from interrupt contexts */
        spinlock_t                ll_lock;
@@ -497,10 +497,12 @@ struct ll_sb_info {
         int                       ll_flags;
        unsigned int              ll_umounting:1,
                                  ll_xattr_cache_enabled:1;
-        cfs_list_t                ll_conn_chain; /* per-conn chain of SBs */
+       /* per-conn chain of SBs */
+       struct list_head                ll_conn_chain;
         struct lustre_client_ocd  ll_lco;
 
-        cfs_list_t                ll_orphan_dentry_list; /*please don't ask -p*/
+       /*please don't ask -p*/
+       struct list_head        ll_orphan_dentry_list;
         struct ll_close_queue    *ll_lcq;
 
         struct lprocfs_stats     *ll_stats; /* lprocfs stats counter */
@@ -555,7 +557,7 @@ struct ll_ra_read {
         pgoff_t             lrr_start;
         pgoff_t             lrr_count;
         struct task_struct *lrr_reader;
-        cfs_list_t          lrr_linkage;
+       struct list_head          lrr_linkage;
 };
 
 /*
@@ -620,7 +622,7 @@ struct ll_readahead_state {
          * progress against this file descriptor. Used by read-ahead code,
          * protected by ->ras_lock.
          */
-        cfs_list_t      ras_read_beads;
+       struct list_head      ras_read_beads;
         /*
          * The following 3 items are used for detecting the stride I/O
          * mode.
@@ -896,7 +898,7 @@ extern struct inode_operations ll_fast_symlink_inode_operations;
 /* llite/llite_close.c */
 struct ll_close_queue {
        spinlock_t              lcq_lock;
-       cfs_list_t              lcq_head;
+       struct list_head                lcq_head;
        wait_queue_head_t       lcq_waitq;
        struct completion       lcq_comp;
        atomic_t                lcq_stop;
@@ -1132,7 +1134,7 @@ int ll_removexattr(struct dentry *dentry, const char *name);
 extern struct kmem_cache *ll_remote_perm_cachep;
 extern struct kmem_cache *ll_rmtperm_hash_cachep;
 
-void free_rmtperm_hash(cfs_hlist_head_t *hash);
+void free_rmtperm_hash(struct hlist_head *hash);
 int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
 int lustre_check_remote_perm(struct inode *inode, int mask);
 
index 59de21c..ee8829d 100644 (file)
@@ -59,7 +59,7 @@
 
 struct kmem_cache *ll_file_data_slab;
 
-static LIST_HEAD(ll_super_blocks);
+static struct list_head ll_super_blocks = LIST_HEAD_INIT(ll_super_blocks);
 static DEFINE_SPINLOCK(ll_sb_lock);
 
 #ifndef log2
@@ -95,7 +95,7 @@ static struct ll_sb_info *ll_init_sbi(void)
        sbi->ll_cache.ccc_lru_max = lru_page_max;
        atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
        spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
-       CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+       INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
 
        atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
        init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
@@ -105,15 +105,15 @@ static struct ll_sb_info *ll_init_sbi(void)
        sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
        sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
                                           SBI_DEFAULT_READAHEAD_WHOLE_MAX;
-        CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
-        CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
+       INIT_LIST_HEAD(&sbi->ll_conn_chain);
+       INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
 
         ll_generate_random_uuid(uuid);
         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
 
        spin_lock(&ll_sb_lock);
-       cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+       list_add_tail(&sbi->ll_list, &ll_super_blocks);
        spin_unlock(&ll_sb_lock);
 
         sbi->ll_flags |= LL_SBI_VERBOSE;
@@ -149,7 +149,7 @@ static void ll_free_sbi(struct super_block *sb)
 
        if (sbi != NULL) {
                spin_lock(&ll_sb_lock);
-               cfs_list_del(&sbi->ll_list);
+               list_del(&sbi->ll_list);
                spin_unlock(&ll_sb_lock);
                OBD_FREE(sbi, sizeof(*sbi));
        }
@@ -739,7 +739,7 @@ static void client_common_put_super(struct super_block *sb)
 
         cl_sb_fini(sb);
 
-        cfs_list_del(&sbi->ll_conn_chain);
+       list_del(&sbi->ll_conn_chain);
 
        obd_fid_fini(sbi->ll_dt_exp->exp_obd);
         obd_disconnect(sbi->ll_dt_exp);
@@ -977,7 +977,7 @@ void ll_lli_init(struct ll_inode_info *lli)
                mutex_init(&lli->lli_write_mutex);
                init_rwsem(&lli->lli_glimpse_sem);
                lli->lli_glimpse_time = 0;
-               CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
+               INIT_LIST_HEAD(&lli->lli_agl_list);
                lli->lli_agl_index = 0;
                lli->lli_async_rc = 0;
        }
index 8eca4a3..4f22a20 100644 (file)
@@ -82,7 +82,7 @@ static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
         if (!rce)
                 return NULL;
 
-        CFS_INIT_LIST_HEAD(&rce->rce_list);
+       INIT_LIST_HEAD(&rce->rce_list);
         rce->rce_key = key;
         rce->rce_ops = ops;
 
@@ -91,8 +91,8 @@ static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
 
 static void rce_free(struct rmtacl_ctl_entry *rce)
 {
-        if (!cfs_list_empty(&rce->rce_list))
-                cfs_list_del(&rce->rce_list);
+       if (!list_empty(&rce->rce_list))
+               list_del(&rce->rce_list);
 
         OBD_FREE_PTR(rce);
 }
@@ -101,9 +101,9 @@ static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
                                            pid_t key)
 {
         struct rmtacl_ctl_entry *rce;
-        cfs_list_t *head = &rct->rct_entries[rce_hashfunc(key)];
+       struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
 
-        cfs_list_for_each_entry(rce, head, rce_list)
+       list_for_each_entry(rce, head, rce_list)
                 if (rce->rce_key == key)
                         return rce;
 
@@ -135,7 +135,7 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
                      "[key: %d] [ops: %d]\n", (int)key, ops);
                rce_free(e);
        }
-       cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+       list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
        spin_unlock(&rct->rct_lock);
 
        return 0;
@@ -160,7 +160,7 @@ void rct_init(struct rmtacl_ctl_table *rct)
 
        spin_lock_init(&rct->rct_lock);
        for (i = 0; i < RCE_HASHES; i++)
-               CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
+               INIT_LIST_HEAD(&rct->rct_entries[i]);
 }
 
 void rct_fini(struct rmtacl_ctl_table *rct)
@@ -170,8 +170,8 @@ void rct_fini(struct rmtacl_ctl_table *rct)
 
        spin_lock(&rct->rct_lock);
        for (i = 0; i < RCE_HASHES; i++)
-               while (!cfs_list_empty(&rct->rct_entries[i])) {
-                       rce = cfs_list_entry(rct->rct_entries[i].next,
+               while (!list_empty(&rct->rct_entries[i])) {
+                       rce = list_entry(rct->rct_entries[i].next,
                                             struct rmtacl_ctl_entry, rce_list);
                        rce_free(rce);
                }
@@ -188,7 +188,7 @@ static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
         if (!ee)
                 return NULL;
 
-        CFS_INIT_LIST_HEAD(&ee->ee_list);
+       INIT_LIST_HEAD(&ee->ee_list);
         ee->ee_key = key;
         ee->ee_fid = *fid;
         ee->ee_type = type;
@@ -199,8 +199,8 @@ static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
 
 void ee_free(struct eacl_entry *ee)
 {
-        if (!cfs_list_empty(&ee->ee_list))
-                cfs_list_del(&ee->ee_list);
+       if (!list_empty(&ee->ee_list))
+               list_del(&ee->ee_list);
 
         if (ee->ee_acl)
                 lustre_ext_acl_xattr_free(ee->ee_acl);
@@ -212,14 +212,14 @@ static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
                                         struct lu_fid *fid, int type)
 {
         struct eacl_entry *ee;
-        cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+       struct list_head *head = &et->et_entries[ee_hashfunc(key)];
 
         LASSERT(fid != NULL);
-        cfs_list_for_each_entry(ee, head, ee_list)
+       list_for_each_entry(ee, head, ee_list)
                 if (ee->ee_key == key) {
                         if (lu_fid_eq(&ee->ee_fid, fid) &&
                             ee->ee_type == type) {
-                                cfs_list_del_init(&ee->ee_list);
+                               list_del_init(&ee->ee_list);
                                 return ee;
                         }
                 }
@@ -241,10 +241,10 @@ struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
 void et_search_free(struct eacl_table *et, pid_t key)
 {
        struct eacl_entry *ee, *next;
-       cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+       struct list_head *head = &et->et_entries[ee_hashfunc(key)];
 
        spin_lock(&et->et_lock);
-       cfs_list_for_each_entry_safe(ee, next, head, ee_list)
+       list_for_each_entry_safe(ee, next, head, ee_list)
                if (ee->ee_key == key)
                        ee_free(ee);
 
@@ -268,7 +268,7 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
                      (int)key, PFID(fid), type);
                ee_free(e);
        }
-       cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+       list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
        spin_unlock(&et->et_lock);
 
        return 0;
@@ -280,7 +280,7 @@ void et_init(struct eacl_table *et)
 
        spin_lock_init(&et->et_lock);
        for (i = 0; i < EE_HASHES; i++)
-               CFS_INIT_LIST_HEAD(&et->et_entries[i]);
+               INIT_LIST_HEAD(&et->et_entries[i]);
 }
 
 void et_fini(struct eacl_table *et)
@@ -290,9 +290,9 @@ void et_fini(struct eacl_table *et)
 
        spin_lock(&et->et_lock);
        for (i = 0; i < EE_HASHES; i++)
-               while (!cfs_list_empty(&et->et_entries[i])) {
-                       ee = cfs_list_entry(et->et_entries[i].next,
-                                           struct eacl_entry, ee_list);
+               while (!list_empty(&et->et_entries[i])) {
+                       ee = list_entry(et->et_entries[i].next,
+                                       struct eacl_entry, ee_list);
                        ee_free(ee);
                }
        spin_unlock(&et->et_lock);
index d155960..52d09bb 100644 (file)
@@ -64,7 +64,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
 
        OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
        if (lrp)
-               CFS_INIT_HLIST_NODE(&lrp->lrp_list);
+               INIT_HLIST_NODE(&lrp->lrp_list);
        return lrp;
 }
 
@@ -73,14 +73,14 @@ static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
         if (!lrp)
                 return;
 
-        if (!cfs_hlist_unhashed(&lrp->lrp_list))
-                cfs_hlist_del(&lrp->lrp_list);
+       if (!hlist_unhashed(&lrp->lrp_list))
+               hlist_del(&lrp->lrp_list);
         OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
 }
 
 static struct hlist_head *alloc_rmtperm_hash(void)
 {
-        cfs_hlist_head_t *hash;
+       struct hlist_head *hash;
         int i;
 
        OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
@@ -90,16 +90,16 @@ static struct hlist_head *alloc_rmtperm_hash(void)
                 return NULL;
 
         for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-                CFS_INIT_HLIST_HEAD(hash + i);
+               INIT_HLIST_HEAD(hash + i);
 
         return hash;
 }
 
-void free_rmtperm_hash(cfs_hlist_head_t *hash)
+void free_rmtperm_hash(struct hlist_head *hash)
 {
         int i;
         struct ll_remote_perm *lrp;
-        cfs_hlist_node_t *node, *next;
+       struct hlist_node *node, *next;
 
         if(!hash)
                 return;
@@ -121,9 +121,9 @@ static inline int remote_perm_hashfunc(uid_t uid)
  * MDT when client get remote permission. */
 static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
 {
-        cfs_hlist_head_t *head;
+       struct hlist_head *head;
         struct ll_remote_perm *lrp;
-        cfs_hlist_node_t *node;
+       struct hlist_node *node;
         int found = 0, rc;
         ENTRY;
 
@@ -164,8 +164,8 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
 {
         struct ll_inode_info *lli = ll_i2info(inode);
         struct ll_remote_perm *lrp = NULL, *tmp = NULL;
-        cfs_hlist_head_t *head, *perm_hash = NULL;
-        cfs_hlist_node_t *node;
+       struct hlist_head *head, *perm_hash = NULL;
+       struct hlist_node *node;
         ENTRY;
 
         LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
@@ -235,7 +235,7 @@ again:
                 lrp->lrp_gid         = perm->rp_gid;
                 lrp->lrp_fsuid       = perm->rp_fsuid;
                 lrp->lrp_fsgid       = perm->rp_fsgid;
-                cfs_hlist_add_head(&lrp->lrp_list, head);
+               hlist_add_head(&lrp->lrp_list, head);
         }
         lli->lli_rmtperm_time = cfs_time_current();
        spin_unlock(&lli->lli_lock);
@@ -309,28 +309,3 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
         ptlrpc_req_finished(req);
         RETURN(rc);
 }
-
-#if 0  /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
-        * because it will fail sanity test 48.
-        */
-void ll_free_remote_perms(struct inode *inode)
-{
-        struct ll_inode_info *lli = ll_i2info(inode);
-        cfs_hlist_head_t *hash = lli->lli_remote_perms;
-        struct ll_remote_perm *lrp;
-        cfs_hlist_node_t *node, *next;
-        int i;
-
-        LASSERT(hash);
-
-       spin_lock(&lli->lli_lock);
-
-       for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
-               cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
-                                             lrp_list)
-                       free_ll_remote_perm(lrp);
-       }
-
-       spin_unlock(&lli->lli_lock);
-}
-#endif
index a873e9e..ef26a13 100644 (file)
@@ -276,7 +276,7 @@ void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
        ras->ras_consecutive_requests++;
        rar->lrr_reader = current;
 
-       cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+       list_add(&rar->lrr_linkage, &ras->ras_read_beads);
        spin_unlock(&ras->ras_lock);
 }
 
@@ -287,7 +287,7 @@ void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
        ras = ll_ras_get(f);
 
        spin_lock(&ras->ras_lock);
-       cfs_list_del_init(&rar->lrr_linkage);
+       list_del_init(&rar->lrr_linkage);
        spin_unlock(&ras->ras_lock);
 }
 
@@ -295,7 +295,7 @@ static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
 {
         struct ll_ra_read *scan;
 
-        cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+       list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
                 if (scan->lrr_reader == current)
                         return scan;
         }
@@ -754,7 +754,7 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
        spin_lock_init(&ras->ras_lock);
        ras_reset(inode, ras, 0);
        ras->ras_requests = 0;
-       CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+       INIT_LIST_HEAD(&ras->ras_read_beads);
 }
 
 /*
index bb75469..88a9bee 100644 (file)
@@ -59,13 +59,13 @@ typedef enum {
 
 struct ll_sa_entry {
        /* link into sai->sai_entries */
-       cfs_list_t              se_link;
+       struct list_head        se_link;
        /* link into sai->sai_entries_{received,stated} */
-       cfs_list_t              se_list;
+       struct list_head        se_list;
        /* link into sai hash table locally */
-       cfs_list_t              se_hash;
+       struct list_head        se_hash;
        /* entry reference count */
-       atomic_t            se_refcount;
+       atomic_t                se_refcount;
        /* entry index in the sai */
        __u64                   se_index;
        /* low layer ldlm lock handle */
@@ -89,7 +89,7 @@ static DEFINE_SPINLOCK(sai_generation_lock);
 
 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
 {
-        return cfs_list_empty(&entry->se_hash);
+       return list_empty(&entry->se_hash);
 }
 
 /*
@@ -115,7 +115,7 @@ ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
        int i = ll_sa_entry_hash(entry->se_qstr.hash);
 
        spin_lock(&sai->sai_cache_lock[i]);
-       cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+       list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
        spin_unlock(&sai->sai_cache_lock[i]);
 }
 
@@ -128,7 +128,7 @@ ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
        int i = ll_sa_entry_hash(entry->se_qstr.hash);
 
        spin_lock(&sai->sai_cache_lock[i]);
-       cfs_list_del_init(&entry->se_hash);
+       list_del_init(&entry->se_hash);
        spin_unlock(&sai->sai_cache_lock[i]);
 }
 
@@ -141,15 +141,15 @@ static inline int agl_should_run(struct ll_statahead_info *sai,
 static inline struct ll_sa_entry *
 sa_first_received_entry(struct ll_statahead_info *sai)
 {
-        return cfs_list_entry(sai->sai_entries_received.next,
-                              struct ll_sa_entry, se_list);
+       return list_entry(sai->sai_entries_received.next,
+                         struct ll_sa_entry, se_list);
 }
 
 static inline struct ll_inode_info *
 agl_first_entry(struct ll_statahead_info *sai)
 {
-        return cfs_list_entry(sai->sai_entries_agl.next,
-                              struct ll_inode_info, lli_agl_list);
+       return list_entry(sai->sai_entries_agl.next,
+                         struct ll_inode_info, lli_agl_list);
 }
 
 static inline int sa_sent_full(struct ll_statahead_info *sai)
@@ -159,12 +159,12 @@ static inline int sa_sent_full(struct ll_statahead_info *sai)
 
 static inline int sa_received_empty(struct ll_statahead_info *sai)
 {
-        return cfs_list_empty(&sai->sai_entries_received);
+       return list_empty(&sai->sai_entries_received);
 }
 
 static inline int agl_list_empty(struct ll_statahead_info *sai)
 {
-        return cfs_list_empty(&sai->sai_entries_agl);
+       return list_empty(&sai->sai_entries_agl);
 }
 
 /**
@@ -247,8 +247,8 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
 
        lli = ll_i2info(sai->sai_inode);
        spin_lock(&lli->lli_sa_lock);
-       cfs_list_add_tail(&entry->se_link, &sai->sai_entries);
-       CFS_INIT_LIST_HEAD(&entry->se_list);
+       list_add_tail(&entry->se_link, &sai->sai_entries);
+       INIT_LIST_HEAD(&entry->se_list);
        ll_sa_entry_enhash(sai, entry);
        spin_unlock(&lli->lli_sa_lock);
 
@@ -270,7 +270,7 @@ ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
         struct ll_sa_entry *entry;
         int i = ll_sa_entry_hash(qstr->hash);
 
-        cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
+       list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
                 if (entry->se_qstr.hash == qstr->hash &&
                     entry->se_qstr.len == qstr->len &&
                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
@@ -291,7 +291,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
 {
        struct ll_sa_entry *entry;
 
-       cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) {
+       list_for_each_entry(entry, &sai->sai_entries, se_link) {
                if (entry->se_index == index) {
                        LASSERT(atomic_read(&entry->se_refcount) > 0);
                        atomic_inc(&entry->se_refcount);
@@ -330,8 +330,8 @@ static void ll_sa_entry_put(struct ll_statahead_info *sai,
                       entry->se_qstr.len, entry->se_qstr.name, entry,
                       entry->se_index);
 
-               LASSERT(cfs_list_empty(&entry->se_link));
-               LASSERT(cfs_list_empty(&entry->se_list));
+               LASSERT(list_empty(&entry->se_link));
+               LASSERT(list_empty(&entry->se_list));
                LASSERT(ll_sa_entry_unhashed(entry));
 
                ll_sa_entry_cleanup(sai, entry);
@@ -349,15 +349,15 @@ do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
        struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
 
        LASSERT(!ll_sa_entry_unhashed(entry));
-       LASSERT(!cfs_list_empty(&entry->se_link));
+       LASSERT(!list_empty(&entry->se_link));
 
        ll_sa_entry_unhash(sai, entry);
 
        spin_lock(&lli->lli_sa_lock);
        entry->se_stat = SA_ENTRY_DEST;
-       cfs_list_del_init(&entry->se_link);
-       if (likely(!cfs_list_empty(&entry->se_list)))
-               cfs_list_del_init(&entry->se_list);
+       list_del_init(&entry->se_link);
+       if (likely(!list_empty(&entry->se_list)))
+               list_del_init(&entry->se_list);
        spin_unlock(&lli->lli_sa_lock);
 
        ll_sa_entry_put(sai, entry);
@@ -375,7 +375,7 @@ ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
                do_sa_entry_fini(sai, entry);
 
        /* drop old entry, only 'scanner' process does this, no need to lock */
-       cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
+       list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
                if (!is_omitted_entry(sai, pos->se_index))
                        break;
                do_sa_entry_fini(sai, pos);
@@ -390,19 +390,19 @@ do_sa_entry_to_stated(struct ll_statahead_info *sai,
                      struct ll_sa_entry *entry, se_stat_t stat)
 {
        struct ll_sa_entry *se;
-       cfs_list_t         *pos = &sai->sai_entries_stated;
+       struct list_head   *pos = &sai->sai_entries_stated;
 
-       if (!cfs_list_empty(&entry->se_list))
-               cfs_list_del_init(&entry->se_list);
+       if (!list_empty(&entry->se_list))
+               list_del_init(&entry->se_list);
 
-       cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+       list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
                if (se->se_index < entry->se_index) {
                        pos = &se->se_list;
                        break;
                }
        }
 
-       cfs_list_add(&entry->se_list, pos);
+       list_add(&entry->se_list, pos);
        entry->se_stat = stat;
 }
 
@@ -445,13 +445,13 @@ static void ll_agl_add(struct ll_statahead_info *sai,
                child->lli_agl_index = index;
                spin_unlock(&child->lli_agl_lock);
 
-               LASSERT(cfs_list_empty(&child->lli_agl_list));
+               LASSERT(list_empty(&child->lli_agl_list));
 
                igrab(inode);
                spin_lock(&parent->lli_agl_lock);
                if (agl_list_empty(sai))
                        added = 1;
-               cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+               list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
                spin_unlock(&parent->lli_agl_lock);
        } else {
                spin_unlock(&child->lli_agl_lock);
@@ -485,13 +485,13 @@ static struct ll_statahead_info *ll_sai_alloc(void)
        init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
        init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
 
-       CFS_INIT_LIST_HEAD(&sai->sai_entries);
-       CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
-       CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
-       CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
+       INIT_LIST_HEAD(&sai->sai_entries);
+       INIT_LIST_HEAD(&sai->sai_entries_received);
+       INIT_LIST_HEAD(&sai->sai_entries_stated);
+       INIT_LIST_HEAD(&sai->sai_entries_agl);
 
        for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
-               CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
+               INIT_LIST_HEAD(&sai->sai_cache[i]);
                spin_lock_init(&sai->sai_cache_lock[i]);
        }
        atomic_set(&sai->sai_cache_count, 0);
@@ -536,8 +536,8 @@ static void ll_sai_put(struct ll_statahead_info *sai)
                               PFID(&lli->lli_fid),
                               sai->sai_sent, sai->sai_replied);
 
-               cfs_list_for_each_entry_safe(entry, next,
-                                            &sai->sai_entries, se_link)
+               list_for_each_entry_safe(entry, next,
+                                        &sai->sai_entries, se_link)
                        do_sa_entry_fini(sai, entry);
 
                LASSERT(list_empty(&sai->sai_entries));
@@ -562,7 +562,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
         int                   rc;
         ENTRY;
 
-        LASSERT(cfs_list_empty(&lli->lli_agl_list));
+       LASSERT(list_empty(&lli->lli_agl_list));
 
         /* AGL maybe fall behind statahead with one entry */
         if (is_omitted_entry(sai, index + 1)) {
@@ -637,7 +637,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
        }
        entry = sa_first_received_entry(sai);
        atomic_inc(&entry->se_refcount);
-       cfs_list_del_init(&entry->se_list);
+       list_del_init(&entry->se_list);
        spin_unlock(&lli->lli_sa_lock);
 
         LASSERT(entry->se_handle != 0);
@@ -753,7 +753,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
                        entry->se_handle = it->d.lustre.it_lock_handle;
                        ll_intent_drop_lock(it);
                        wakeup = sa_received_empty(sai);
-                       cfs_list_add_tail(&entry->se_list,
+                       list_add_tail(&entry->se_list,
                                          &sai->sai_entries_received);
                 }
                sai->sai_replied++;
@@ -1008,7 +1008,7 @@ static int ll_agl_thread(void *arg)
                 * so check whether list empty again. */
                if (!agl_list_empty(sai)) {
                        clli = agl_first_entry(sai);
-                       cfs_list_del_init(&clli->lli_agl_list);
+                       list_del_init(&clli->lli_agl_list);
                        spin_unlock(&plli->lli_agl_lock);
                        ll_agl_trigger(&clli->lli_vfs_inode, sai);
                } else {
@@ -1020,7 +1020,7 @@ static int ll_agl_thread(void *arg)
        sai->sai_agl_valid = 0;
        while (!agl_list_empty(sai)) {
                clli = agl_first_entry(sai);
-               cfs_list_del_init(&clli->lli_agl_list);
+               list_del_init(&clli->lli_agl_list);
                spin_unlock(&plli->lli_agl_lock);
                clli->lli_agl_index = 0;
                iput(&clli->lli_vfs_inode);
@@ -1177,7 +1177,7 @@ interpret_it:
                        spin_lock(&plli->lli_agl_lock);
                        while (!agl_list_empty(sai)) {
                                clli = agl_first_entry(sai);
-                               cfs_list_del_init(&clli->lli_agl_list);
+                               list_del_init(&clli->lli_agl_list);
                                spin_unlock(&plli->lli_agl_lock);
                                ll_agl_trigger(&clli->lli_vfs_inode,
                                               sai);
@@ -1233,7 +1233,7 @@ do_it:
        while (!agl_list_empty(sai) &&
               thread_is_running(thread)) {
                clli = agl_first_entry(sai);
-               cfs_list_del_init(&clli->lli_agl_list);
+               list_del_init(&clli->lli_agl_list);
                spin_unlock(&plli->lli_agl_lock);
                ll_agl_trigger(&clli->lli_vfs_inode, sai);
                spin_lock(&plli->lli_agl_lock);
@@ -1508,7 +1508,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
         if (sai) {
                 thread = &sai->sai_thread;
                 if (unlikely(thread_is_stopped(thread) &&
-                             cfs_list_empty(&sai->sai_entries_stated))) {
+                            list_empty(&sai->sai_entries_stated))) {
                         /* to release resource */
                         ll_stop_statahead(dir, lli->lli_opendir_key);
                         RETURN(-EAGAIN);
index 0a354c8..f63472b 100644 (file)
@@ -154,7 +154,7 @@ static int __init init_lustre_lite(void)
 
        ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
                                                   REMOTE_PERM_HASHSIZE *
-                                                  sizeof(cfs_list_t),
+                                                  sizeof(struct list_head),
                                                   0, 0, NULL);
        if (ll_rmtperm_hash_cachep == NULL) {
                kmem_cache_destroy(ll_remote_perm_cachep);
index 6a36808..ae98455 100644 (file)
@@ -298,7 +298,7 @@ static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
 }
 
 static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                               cfs_hlist_node_t *hnode, void *data)
+                              struct hlist_node *hnode, void *data)
 {
         struct vvp_pgcache_id   *id  = data;
         struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
index 3b349f4..41d4975 100644 (file)
@@ -540,7 +540,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
        cpg->cpg_page = vmpage;
        page_cache_get(vmpage);
 
-       CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+       INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
        if (page->cp_type == CPT_CACHEABLE) {
                /* in cache, decref in vvp_page_delete */
                atomic_inc(&page->cp_ref);
index 9ef62c5..c0bc334 100644 (file)
@@ -84,7 +84,7 @@ static void ll_xattr_cache_init(struct ll_inode_info *lli)
 
        LASSERT(lli != NULL);
 
-       CFS_INIT_LIST_HEAD(&lli->lli_xattrs);
+       INIT_LIST_HEAD(&lli->lli_xattrs);
        lli->lli_flags |= LLIF_XATTR_CACHE;
 }
 
index 55603ee..9c96ac0 100644 (file)
@@ -406,7 +406,7 @@ struct lov_lock_link {
          * A linkage into per sub-lock list of all corresponding top-locks,
          * hanging off lovsub_lock::lss_parents.
          */
-        cfs_list_t       lll_list;
+       struct list_head lll_list;
 };
 
 /**
@@ -418,7 +418,7 @@ struct lovsub_lock {
          * List of top-locks that have given sub-lock as their part. Protected
          * by cl_lock::cll_guard mutex.
          */
-        cfs_list_t            lss_parents;
+       struct list_head        lss_parents;
         /**
          * Top-lock that initiated current operation on this sub-lock. This is
          * only set during top-to-bottom lock operations like enqueue, and is
@@ -471,7 +471,7 @@ struct lov_io_sub {
          * Linkage into a list (hanging off lov_io::lis_active) of all
          * sub-io's active for the current IO iteration.
          */
-        cfs_list_t           sub_linkage;
+       struct list_head        sub_linkage;
         /**
          * true, iff cl_io_init() was successfully executed against
          * lov_io_sub::sub_io.
@@ -547,7 +547,7 @@ struct lov_io {
         /**
          * List of active sub-io's.
          */
-        cfs_list_t         lis_active;
+       struct list_head        lis_active;
 };
 
 struct lov_session {
index a750609..9de5193 100644 (file)
@@ -145,7 +145,7 @@ static void *lov_key_init(const struct lu_context *ctx,
 
        OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, GFP_NOFS);
        if (info != NULL)
-               CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list);
+               INIT_LIST_HEAD(&info->lti_closure.clc_list);
        else
                info = ERR_PTR(-ENOMEM);
        return info;
@@ -155,7 +155,7 @@ static void lov_key_fini(const struct lu_context *ctx,
                          struct lu_context_key *key, void *data)
 {
         struct lov_thread_info *info = data;
-        LINVRNT(cfs_list_empty(&info->lti_closure.clc_list));
+       LINVRNT(list_empty(&info->lti_closure.clc_list));
         OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
 }
 
index f24abbd..52d1afd 100644 (file)
@@ -80,7 +80,7 @@ struct pool_desc {
        char                     pool_name[LOV_MAXPOOLNAME + 1];
        struct ost_pool          pool_obds;
        atomic_t                 pool_refcount;
-       cfs_hlist_node_t         pool_hash;     /* access by poolname */
+       struct hlist_node        pool_hash;     /* access by poolname */
        struct list_head         pool_list;     /* serial access */
        struct proc_dir_entry   *pool_proc_entry;
        struct obd_device       *pool_lobd;     /* owner */
@@ -89,7 +89,7 @@ struct pool_desc {
 struct lov_request {
        struct obd_info          rq_oi;
        struct lov_request_set  *rq_rqset;
-       cfs_list_t               rq_link;
+       struct list_head         rq_link;
        int                      rq_idx;        /* index in lov->tgts array */
        int                      rq_stripe;     /* stripe number */
        int                      rq_complete;
@@ -108,7 +108,7 @@ struct lov_request_set {
        atomic_t                 set_success;
        atomic_t                 set_finish_checked;
        struct llog_cookie      *set_cookies;
-       cfs_list_t               set_list;
+       struct list_head         set_list;
        wait_queue_head_t        set_waitq;
 };
 
index bab17f7..871d37f 100644 (file)
@@ -421,7 +421,7 @@ static int lov_io_iter_init(const struct lu_env *env,
                         rc = PTR_ERR(sub);
 
                 if (!rc)
-                        cfs_list_add_tail(&sub->sub_linkage, &lio->lis_active);
+                       list_add_tail(&sub->sub_linkage, &lio->lis_active);
                 else
                         break;
         }
@@ -473,7 +473,7 @@ static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
         int rc = 0;
 
         ENTRY;
-        cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+       list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
                 lov_sub_enter(sub);
                 rc = iofunc(sub->sub_env, sub->sub_io);
                 lov_sub_exit(sub);
@@ -542,8 +542,8 @@ static void lov_io_iter_fini(const struct lu_env *env,
         ENTRY;
         rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
         LASSERT(rc == 0);
-        while (!cfs_list_empty(&lio->lis_active))
-                cfs_list_del_init(lio->lis_active.next);
+       while (!list_empty(&lio->lis_active))
+               list_del_init(lio->lis_active.next);
         EXIT;
 }
 
@@ -742,7 +742,7 @@ static void lov_io_fsync_end(const struct lu_env *env,
        ENTRY;
 
        *written = 0;
-       cfs_list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+       list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
                struct cl_io *subio = sub->sub_io;
 
                lov_sub_enter(sub);
index 916a415..4e7901b 100644 (file)
@@ -122,7 +122,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
         lck->lls_sub[idx].sub_lock = lsl;
         lck->lls_nr_filled++;
         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
-        cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
+       list_add_tail(&link->lll_list, &lsl->lss_parents);
         link->lll_idx = idx;
         link->lll_super = lck;
         cl_lock_get(parent);
@@ -205,7 +205,7 @@ static int lov_sublock_lock(const struct lu_env *env,
         int                 result = 0;
         ENTRY;
 
-        LASSERT(cfs_list_empty(&closure->clc_list));
+       LASSERT(list_empty(&closure->clc_list));
 
         sublock = lls->sub_lock;
         child = sublock->lss_cl.cls_lock;
@@ -1047,7 +1047,7 @@ void lov_lock_unlink(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        cfs_list_del_init(&link->lll_list);
+       list_del_init(&link->lll_list);
         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
         /* yank this sub-lock from parent's array */
         lck->lls_sub[link->lll_idx].sub_lock = NULL;
@@ -1068,7 +1068,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+       list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 if (scan->lll_super == lck)
                         RETURN(scan);
         }
@@ -1223,7 +1223,7 @@ static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
         struct cl_lock_closure *closure;
 
         closure = &lov_env_info(env)->lti_closure;
-        LASSERT(cfs_list_empty(&closure->clc_list));
+       LASSERT(list_empty(&closure->clc_list));
         cl_lock_closure_init(env, closure, parent, 1);
         return closure;
 }
index 1b31a17..099eb62 100644 (file)
@@ -98,7 +98,7 @@ static void lov_putref(struct obd_device *obd)
 
                         if (!tgt || !tgt->ltd_reap)
                                 continue;
-                        cfs_list_add(&tgt->ltd_kill, &kill);
+                       list_add(&tgt->ltd_kill, &kill);
                         /* XXX - right now there is a dependency on ld_tgt_count
                          * being the maximum tgt index for computing the
                          * mds_max_easize. So we can't shrink it. */
@@ -108,8 +108,8 @@ static void lov_putref(struct obd_device *obd)
                 }
                mutex_unlock(&lov->lov_lock);
 
-                cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
-                        cfs_list_del(&tgt->ltd_kill);
+               list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
+                       list_del(&tgt->ltd_kill);
                         /* Disconnect */
                         __lov_del_obd(obd, tgt);
                 }
@@ -836,7 +836,7 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                                                    CFS_HASH_MAX_THETA,
                                                    &pool_hash_operations,
                                                    CFS_HASH_DEFAULT);
-        CFS_INIT_LIST_HEAD(&lov->lov_pool_list);
+       INIT_LIST_HEAD(&lov->lov_pool_list);
         lov->lov_pool_count = 0;
         rc = lov_ost_pool_init(&lov->lov_packed, 0);
         if (rc)
@@ -914,12 +914,12 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
 static int lov_cleanup(struct obd_device *obd)
 {
         struct lov_obd *lov = &obd->u.lov;
-        cfs_list_t *pos, *tmp;
+       struct list_head *pos, *tmp;
         struct pool_desc *pool;
         ENTRY;
 
-        cfs_list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
-                pool = cfs_list_entry(pos, struct pool_desc, pool_list);
+       list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
+               pool = list_entry(pos, struct pool_desc, pool_list);
                 /* free pool structs */
                 CDEBUG(D_INFO, "delete pool %p\n", pool);
                /* In the function below, .hs_keycmp resolves to
@@ -1112,7 +1112,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
         struct lov_request_set *set;
         struct obd_info oinfo;
         struct lov_request *req;
-        cfs_list_t *pos;
+       struct list_head *pos;
         struct lov_obd *lov;
         int rc = 0, err = 0;
         ENTRY;
@@ -1133,8 +1133,8 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
         if (rc)
                 GOTO(out, rc);
 
-        cfs_list_for_each (pos, &set->set_list) {
-                req = cfs_list_entry(pos, struct lov_request, rq_link);
+       list_for_each(pos, &set->set_list) {
+               req = list_entry(pos, struct lov_request, rq_link);
 
                 if (oa->o_valid & OBD_MD_FLCOOKIE)
                         oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1182,7 +1182,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *lovset;
         struct lov_obd *lov;
-        cfs_list_t *pos;
+       struct list_head *pos;
         struct lov_request *req;
         int rc = 0, err;
         ENTRY;
@@ -1203,8 +1203,8 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
               POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
               oinfo->oi_md->lsm_stripe_size);
 
-       cfs_list_for_each(pos, &lovset->set_list) {
-               req = cfs_list_entry(pos, struct lov_request, rq_link);
+       list_for_each(pos, &lovset->set_list) {
+               req = list_entry(pos, struct lov_request, rq_link);
 
                CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
                       "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
@@ -1222,7 +1222,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
                }
        }
 
-        if (!cfs_list_empty(&rqset->set_requests)) {
+       if (!list_empty(&rqset->set_requests)) {
                 LASSERT(rc == 0);
                 LASSERT (rqset->set_interpret == NULL);
                 rqset->set_interpret = lov_getattr_interpret;
@@ -1257,7 +1257,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
 {
         struct lov_request_set *set;
         struct lov_request *req;
-        cfs_list_t *pos;
+       struct list_head *pos;
         struct lov_obd *lov;
         int rc = 0;
         ENTRY;
@@ -1282,8 +1282,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
               oinfo->oi_md->lsm_stripe_count,
               oinfo->oi_md->lsm_stripe_size);
 
-       cfs_list_for_each(pos, &set->set_list) {
-               req = cfs_list_entry(pos, struct lov_request, rq_link);
+       list_for_each(pos, &set->set_list) {
+               req = list_entry(pos, struct lov_request, rq_link);
 
                if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
                        oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1305,7 +1305,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
        }
 
        /* If we are not waiting for responses on async requests, return. */
-       if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
+       if (rc || !rqset || list_empty(&rqset->set_requests)) {
                int err;
                if (rc)
                        atomic_set(&set->set_completes, 0);
@@ -1406,7 +1406,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
         struct obd_device      *obd = class_exp2obd(exp);
         struct lov_request_set *set;
         struct lov_request *req;
-        cfs_list_t *pos;
+       struct list_head *pos;
         struct lov_obd *lov;
         int rc = 0;
         ENTRY;
@@ -1419,15 +1419,15 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
         if (rc)
                 RETURN(rc);
 
-        cfs_list_for_each (pos, &set->set_list) {
-                req = cfs_list_entry(pos, struct lov_request, rq_link);
+       list_for_each(pos, &set->set_list) {
+               req = list_entry(pos, struct lov_request, rq_link);
                 rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
                                       &req->rq_oi, max_age, rqset);
                 if (rc)
                         break;
         }
 
-       if (rc || cfs_list_empty(&rqset->set_requests)) {
+       if (rc || list_empty(&rqset->set_requests)) {
                int err;
                if (rc)
                        atomic_set(&set->set_completes, 0);
index 3960438..95366c1 100644 (file)
@@ -144,8 +144,8 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
                RETURN(PTR_ERR(sub));
 
        subobj = lovsub2cl(r0->lo_sub[stripe]);
-       cfs_list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
-                               co_lu.lo_linkage) {
+       list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+                           co_lu.lo_linkage) {
                if (o->co_ops->coo_page_init != NULL) {
                        rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
                                                      cl_index(subobj, suboff));
index 14dd336..88cf76a 100644 (file)
@@ -106,43 +106,44 @@ static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
         return (result % mask);
 }
 
-static void *pool_key(cfs_hlist_node_t *hnode)
+static void *pool_key(struct hlist_node *hnode)
 {
         struct pool_desc *pool;
 
-        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+       pool = hlist_entry(hnode, struct pool_desc, pool_hash);
         return (pool->pool_name);
 }
 
-static int pool_hashkey_keycmp(const void *key, cfs_hlist_node_t *compared_hnode)
+static int
+pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
 {
         char *pool_name;
         struct pool_desc *pool;
 
         pool_name = (char *)key;
-        pool = cfs_hlist_entry(compared_hnode, struct pool_desc, pool_hash);
+       pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
         return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
 }
 
-static void *pool_hashobject(cfs_hlist_node_t *hnode)
+static void *pool_hashobject(struct hlist_node *hnode)
 {
-        return cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+       return hlist_entry(hnode, struct pool_desc, pool_hash);
 }
 
-static void pool_hashrefcount_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         struct pool_desc *pool;
 
-        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+       pool = hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_getref(pool);
 }
 
 static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
-                                         cfs_hlist_node_t *hnode)
+                                        struct hlist_node *hnode)
 {
         struct pool_desc *pool;
 
-        pool = cfs_hlist_entry(hnode, struct pool_desc, pool_hash);
+       pool = hlist_entry(hnode, struct pool_desc, pool_hash);
         lov_pool_putref_locked(pool);
 }
 
@@ -459,7 +460,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
        if (rc)
                GOTO(out_err, rc);
 
-        CFS_INIT_HLIST_NODE(&new_pool->pool_hash);
+       INIT_HLIST_NODE(&new_pool->pool_hash);
 
 #ifdef LPROCFS
         /* we need this assert seq_file is not implementated for liblustre */
@@ -481,7 +482,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
 #endif
 
        spin_lock(&obd->obd_dev_lock);
-       cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+       list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
        lov->lov_pool_count++;
        spin_unlock(&obd->obd_dev_lock);
 
@@ -498,7 +499,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
 
 out_err:
        spin_lock(&obd->obd_dev_lock);
-       cfs_list_del_init(&new_pool->pool_list);
+       list_del_init(&new_pool->pool_list);
        lov->lov_pool_count--;
        spin_unlock(&obd->obd_dev_lock);
         lprocfs_remove(&new_pool->pool_proc_entry);
@@ -528,7 +529,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
         }
 
        spin_lock(&obd->obd_dev_lock);
-       cfs_list_del_init(&pool->pool_list);
+       list_del_init(&pool->pool_list);
        lov->lov_pool_count--;
        spin_unlock(&obd->obd_dev_lock);
 
index 548ed58..9acb68a 100644 (file)
@@ -54,7 +54,7 @@ static void lov_init_set(struct lov_request_set *set)
        atomic_set(&set->set_success, 0);
        atomic_set(&set->set_finish_checked, 0);
        set->set_cookies = 0;
-       CFS_INIT_LIST_HEAD(&set->set_list);
+       INIT_LIST_HEAD(&set->set_list);
        atomic_set(&set->set_refcount, 1);
        init_waitqueue_head(&set->set_waitq);
 }
@@ -130,7 +130,7 @@ int lov_update_common_set(struct lov_request_set *set,
 
 void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
 {
-        cfs_list_add_tail(&req->rq_link, &set->set_list);
+       list_add_tail(&req->rq_link, &set->set_list);
         set->set_count++;
         req->rq_rqset = set;
 }
@@ -193,7 +193,7 @@ out:
 
 static int common_attr_done(struct lov_request_set *set)
 {
-        cfs_list_t *pos;
+       struct list_head *pos;
         struct lov_request *req;
         struct obdo *tmp_oa;
         int rc = 0, attrset = 0;
@@ -211,8 +211,8 @@ static int common_attr_done(struct lov_request_set *set)
        if (tmp_oa == NULL)
                GOTO(out, rc = -ENOMEM);
 
-        cfs_list_for_each (pos, &set->set_list) {
-                req = cfs_list_entry(pos, struct lov_request, rq_link);
+       list_for_each(pos, &set->set_list) {
+               req = list_entry(pos, struct lov_request, rq_link);
 
                 if (!req->rq_complete || req->rq_rc)
                         continue;
index 18665e0..7916d20 100644 (file)
@@ -59,7 +59,7 @@ static void lovsub_lock_fini(const struct lu_env *env,
 
         ENTRY;
         lsl = cl2lovsub_lock(slice);
-        LASSERT(cfs_list_empty(&lsl->lss_parents));
+       LASSERT(list_empty(&lsl->lss_parents));
         OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
         EXIT;
 }
@@ -103,7 +103,7 @@ static void lovsub_lock_state(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
         ENTRY;
 
-        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+       list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 struct lov_lock *lov    = scan->lll_super;
                 struct cl_lock  *parent = lov->lls_cl.cls_lock;
 
@@ -131,7 +131,7 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env,
 
         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
 
-        if (!cfs_list_empty(&lock->lss_parents)) {
+       if (!list_empty(&lock->lss_parents)) {
                 /*
                  * It is not clear whether all parents have to be asked and
                  * their estimations summed, or it is enough to ask one. For
@@ -252,7 +252,7 @@ static int lovsub_lock_modify(const struct lu_env *env,
 
         LASSERT(cl_lock_mode_match(d->cld_mode,
                                    s->cls_lock->cll_descr.cld_mode));
-        cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+       list_for_each_entry(scan, &lock->lss_parents, lll_list) {
                 int rc;
 
                 lov = scan->lll_super;
@@ -279,7 +279,7 @@ static int lovsub_lock_closure(const struct lu_env *env,
         sub    = cl2lovsub_lock(slice);
         result = 0;
 
-        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+       list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 parent = scan->lll_super->lls_cl.cls_lock;
                 result = cl_lock_closure_build(env, parent, closure);
                 if (result != 0)
@@ -426,7 +426,7 @@ static void lovsub_lock_delete(const struct lu_env *env,
                 struct lov_lock_link *temp;
 
                 restart = 0;
-                cfs_list_for_each_entry_safe(scan, temp,
+               list_for_each_entry_safe(scan, temp,
                                              &sub->lss_parents, lll_list) {
                         lov     = scan->lll_super;
                         lovsub_parent_lock(env, lov);
@@ -450,7 +450,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie,
         struct lov_lock      *lov;
         struct lov_lock_link *scan;
 
-        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+       list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 lov = scan->lll_super;
                 (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
                 if (lov != NULL)
@@ -480,7 +480,7 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
        ENTRY;
        OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, GFP_NOFS);
        if (lsk != NULL) {
-               CFS_INIT_LIST_HEAD(&lsk->lss_parents);
+               INIT_LIST_HEAD(&lsk->lss_parents);
                cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
                result = 0;
        } else
index f89383e..6cf3c00 100644 (file)
@@ -76,9 +76,9 @@ struct echo_object {
         struct cl_object_header eo_hdr;
 
         struct echo_device     *eo_dev;
-        cfs_list_t              eo_obj_chain;
+       struct list_head        eo_obj_chain;
         struct lov_stripe_md   *eo_lsm;
-       atomic_t            eo_npages;
+       atomic_t                eo_npages;
         int                     eo_deleted;
 };
 
@@ -93,11 +93,11 @@ struct echo_page {
 };
 
 struct echo_lock {
-        struct cl_lock_slice   el_cl;
-        cfs_list_t             el_chain;
-        struct echo_object    *el_object;
-        __u64                  el_cookie;
-       atomic_t           el_refcount;
+       struct cl_lock_slice    el_cl;
+       struct list_head        el_chain;
+       struct echo_object     *el_object;
+       __u64                   el_cookie;
+       atomic_t                el_refcount;
 };
 
 static int echo_client_setup(const struct lu_env *env,
@@ -345,7 +345,7 @@ static void echo_lock_fini(const struct lu_env *env,
 {
         struct echo_lock *ecl = cl2echo_lock(slice);
 
-        LASSERT(cfs_list_empty(&ecl->el_chain));
+       LASSERT(list_empty(&ecl->el_chain));
         OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
 }
 
@@ -354,7 +354,7 @@ static void echo_lock_delete(const struct lu_env *env,
 {
         struct echo_lock *ecl      = cl2echo_lock(slice);
 
-        LASSERT(cfs_list_empty(&ecl->el_chain));
+       LASSERT(list_empty(&ecl->el_chain));
 }
 
 static int echo_lock_fits_into(const struct lu_env *env,
@@ -410,7 +410,7 @@ static int echo_lock_init(const struct lu_env *env,
        if (el != NULL) {
                cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
                el->el_object = cl2echo_obj(obj);
-               CFS_INIT_LIST_HEAD(&el->el_chain);
+               INIT_LIST_HEAD(&el->el_chain);
                atomic_set(&el->el_refcount, 0);
        }
        RETURN(el == NULL ? -ENOMEM : 0);
@@ -473,7 +473,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
        cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
 
        spin_lock(&ec->ec_lock);
-       cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+       list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
        spin_unlock(&ec->ec_lock);
 
        RETURN(0);
@@ -539,7 +539,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
        LASSERT(atomic_read(&eco->eo_npages) == 0);
 
        spin_lock(&ec->ec_lock);
-        cfs_list_del_init(&eco->eo_obj_chain);
+       list_del_init(&eco->eo_obj_chain);
        spin_unlock(&ec->ec_lock);
 
         lu_object_fini(obj);
@@ -840,7 +840,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
                 ls = next->ld_site;
 
                spin_lock(&ls->ls_ld_lock);
-               cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+               list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
                        if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
                                found = 1;
                                break;
@@ -981,7 +981,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
          * parallelly accessed.
          */
        spin_lock(&ec->ec_lock);
-       cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+       list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
                eco->eo_deleted = 1;
        spin_unlock(&ec->ec_lock);
 
@@ -993,7 +993,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
 
        /* Wait for the last reference to be dropped. */
        spin_lock(&ec->ec_lock);
-       while (!cfs_list_empty(&ec->ec_objects)) {
+       while (!list_empty(&ec->ec_objects)) {
                spin_unlock(&ec->ec_lock);
                CERROR("echo_client still has objects at cleanup time, "
                       "wait for 1 second\n");
@@ -1004,7 +1004,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
        }
        spin_unlock(&ec->ec_lock);
 
-        LASSERT(cfs_list_empty(&ec->ec_locks));
+       LASSERT(list_empty(&ec->ec_locks));
 
        CDEBUG(D_INFO, "No object exists, exiting...\n");
 
@@ -1178,8 +1178,8 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
                 if (rc == 0) {
                         el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
                        spin_lock(&ec->ec_lock);
-                       if (cfs_list_empty(&el->el_chain)) {
-                               cfs_list_add(&el->el_chain, &ec->ec_locks);
+                       if (list_empty(&el->el_chain)) {
+                               list_add(&el->el_chain, &ec->ec_locks);
                                el->el_cookie = ++ec->ec_unique;
                        }
                        atomic_inc(&el->el_refcount);
@@ -1197,19 +1197,19 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
 {
         struct echo_client_obd *ec = ed->ed_ec;
         struct echo_lock       *ecl = NULL;
-        cfs_list_t             *el;
+       struct list_head        *el;
         int found = 0, still_used = 0;
         ENTRY;
 
         LASSERT(ec != NULL);
        spin_lock(&ec->ec_lock);
-        cfs_list_for_each (el, &ec->ec_locks) {
-                ecl = cfs_list_entry (el, struct echo_lock, el_chain);
+       list_for_each(el, &ec->ec_locks) {
+               ecl = list_entry(el, struct echo_lock, el_chain);
                 CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
                 found = (ecl->el_cookie == cookie);
                 if (found) {
                        if (atomic_dec_and_test(&ecl->el_refcount))
-                                cfs_list_del_init(&ecl->el_chain);
+                               list_del_init(&ecl->el_chain);
                         else
                                 still_used = 1;
                         break;
@@ -2882,8 +2882,8 @@ static int echo_client_setup(const struct lu_env *env,
         }
 
        spin_lock_init(&ec->ec_lock);
-        CFS_INIT_LIST_HEAD (&ec->ec_objects);
-        CFS_INIT_LIST_HEAD (&ec->ec_locks);
+       INIT_LIST_HEAD(&ec->ec_objects);
+       INIT_LIST_HEAD(&ec->ec_locks);
         ec->ec_unique = 0;
         ec->ec_nstripes = 0;
 
@@ -2919,7 +2919,7 @@ static int echo_client_setup(const struct lu_env *env,
         if (rc == 0) {
                 /* Turn off pinger because it connects to tgt obd directly. */
                spin_lock(&tgt->obd_dev_lock);
-               cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+               list_del_init(&ec->ec_exp->exp_obd_chain_timed);
                spin_unlock(&tgt->obd_dev_lock);
         }
 
@@ -2956,7 +2956,7 @@ static int echo_client_cleanup(struct obd_device *obddev)
                 RETURN(0);
         }
 
-        if (!cfs_list_empty(&obddev->obd_exports)) {
+       if (!list_empty(&obddev->obd_exports)) {
                 CERROR("still has clients!\n");
                 RETURN(-EBUSY);
         }
index f915b6d..061dac5 100644 (file)
@@ -54,7 +54,7 @@ void out_destroy_update_req(struct dt_update_request *dt_update)
        if (dt_update == NULL)
                return;
 
-       cfs_list_del(&dt_update->dur_list);
+       list_del(&dt_update->dur_list);
        if (dt_update->dur_req != NULL)
                OBD_FREE_LARGE(dt_update->dur_req, dt_update->dur_req_len);
 
index 53826ca..76b1165 100644 (file)
@@ -349,7 +349,7 @@ void tgt_boot_epoch_update(struct lu_target *tgt)
        struct lu_env            env;
        struct ptlrpc_request   *req;
        __u32                    start_epoch;
-       cfs_list_t               client_list;
+       struct list_head         client_list;
        int                      rc;
 
        if (tgt->lut_obd->obd_stopping)
@@ -374,21 +374,21 @@ void tgt_boot_epoch_update(struct lu_target *tgt)
         * with resend requests. Move final list to separate one for processing
         */
        spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
-       cfs_list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
+       list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
        spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
 
        /**
         * go through list of exports participated in recovery and
         * set new epoch for them
         */
-       cfs_list_for_each_entry(req, &client_list, rq_list) {
+       list_for_each_entry(req, &client_list, rq_list) {
                LASSERT(!req->rq_export->exp_delayed);
                if (!req->rq_export->exp_vbr_failed)
                        tgt_client_epoch_update(&env, req->rq_export);
        }
        /** return list back at once */
        spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
-       cfs_list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
+       list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
        spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
        /** update server epoch */
        tgt_server_data_update(&env, tgt, 1);