Whamcloud - gitweb
LU-3963 libcfs: convert llite/lmv/lod/lov cfs_atomic primitive 73/7073/13
authorPeng Tao <tao.peng@emc.com>
Fri, 4 Apr 2014 12:13:57 +0000 (08:13 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 9 Apr 2014 17:35:42 +0000 (17:35 +0000)
This patch convers all cfs_atomic primitives in
llite, lmv, lod and lov.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I24db8a4c9ee6e7e05ccb63bf3d278a2df9125510
Signed-off-by: Nathaniel Clark <nathaniel.l.clark@intel.com>
Reviewed-on: http://review.whamcloud.com/7073
Tested-by: Jenkins
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
25 files changed:
lustre/include/lustre_nrs_tbf.h
lustre/include/obd.h
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/lloop.c
lustre/llite/lproc_llite.c
lustre/llite/statahead.c
lustre/llite/vvp_lock.c
lustre/llite/vvp_object.c
lustre/lmv/lmv_obd.c
lustre/lod/lod_dev.c
lustre/lod/lod_pool.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_obd.c
lustre/lov/lov_object.c
lustre/lov/lov_pack.c
lustre/lov/lov_pool.c
lustre/lov/lov_request.c
lustre/lov/lovsub_dev.c
lustre/nodemap/nodemap_handler.c

index 57c0a99..5d48f3e 100644 (file)
@@ -58,7 +58,7 @@ struct nrs_tbf_client {
        /** Jobid of the client. */
        char                             tc_jobid[JOBSTATS_JOBID_SIZE];
        /** Reference number of the client. */
-       cfs_atomic_t                     tc_ref;
+       atomic_t                         tc_ref;
        /** Likage to rule. */
        cfs_list_t                       tc_linkage;
        /** Pointer to rule. */
@@ -121,7 +121,7 @@ struct nrs_tbf_rule {
        /** Flags of the rule. */
        __u32                            tr_flags;
        /** Usage Reference count taken on the rule. */
-       cfs_atomic_t                     tr_ref;
+       atomic_t                         tr_ref;
        /** Generation of the rule. */
        __u64                            tr_generation;
 };
@@ -185,7 +185,7 @@ struct nrs_tbf_head {
        /**
         * Generation of rules.
         */
-       cfs_atomic_t                     th_rule_sequence;
+       atomic_t                         th_rule_sequence;
        /**
         * Default rule.
         */
index bb67e43..3f1e960 100644 (file)
@@ -296,9 +296,9 @@ struct client_obd {
        int                  cl_chunk;
        int                  cl_extent_tax; /* extent overhead, by bytes */
 
-        /* keep track of objects that have lois that contain pages which
-         * have been queued for async brw.  this lock also protects the
-         * lists of osc_client_pages that hang off of the loi */
+       /* keep track of objects that have lois that contain pages which
+        * have been queued for async brw.  this lock also protects the
+        * lists of osc_client_pages that hang off of the loi */
         /*
          * ->cl_loi_list_lock protects consistency of
          * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
index 7b6302d..3b76920 100644 (file)
@@ -63,7 +63,7 @@ static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
 /* llite capa renewal timer */
 struct timer_list ll_capa_timer;
 /* for debug: indicate whether capa on llite is enabled or not */
-static cfs_atomic_t ll_capa_debug = CFS_ATOMIC_INIT(0);
+static atomic_t ll_capa_debug = ATOMIC_INIT(0);
 static unsigned long long ll_capa_renewed = 0;
 static unsigned long long ll_capa_renewal_noent = 0;
 static unsigned long long ll_capa_renewal_failed = 0;
@@ -138,8 +138,8 @@ static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
 
 static inline int obd_capa_open_count(struct obd_capa *oc)
 {
-        struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
-        return cfs_atomic_read(&lli->lli_open_count);
+       struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
+       return atomic_read(&lli->lli_open_count);
 }
 
 static void ll_delete_capa(struct obd_capa *ocapa)
@@ -259,17 +259,17 @@ static int capa_thread_main(void *unused)
                                 if (!next)
                                         update_capa_timer(ocapa,
                                                           ocapa->c_expiry);
-                                break;
-                        }
-
-                        if (cfs_atomic_read(&ocapa->c_refc) > 1) {
-                                DEBUG_CAPA(D_SEC, &ocapa->c_capa,
-                                           "expired(c_refc %d), don't release",
-                                           cfs_atomic_read(&ocapa->c_refc));
-                                /* don't try to renew any more */
-                                cfs_list_del_init(&ocapa->c_list);
-                                continue;
-                        }
+                               break;
+                       }
+
+                       if (atomic_read(&ocapa->c_refc) > 1) {
+                               DEBUG_CAPA(D_SEC, &ocapa->c_capa,
+                                          "expired(c_refc %d), don't release",
+                                          atomic_read(&ocapa->c_refc));
+                               /* don't try to renew any more */
+                               list_del_init(&ocapa->c_list);
+                               continue;
+                       }
 
                         /* expired capa is released. */
                         DEBUG_CAPA(D_SEC, &ocapa->c_capa, "release expired");
@@ -358,15 +358,15 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
                 capa_get(ocapa);
 
                 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
-        } else {
-                ocapa = NULL;
-
-                if (cfs_atomic_read(&ll_capa_debug)) {
-                        CERROR("no capability for "DFID" opc "LPX64"\n",
-                               PFID(&lli->lli_fid), opc);
-                        cfs_atomic_set(&ll_capa_debug, 0);
-                }
-        }
+       } else {
+               ocapa = NULL;
+
+               if (atomic_read(&ll_capa_debug)) {
+                       CERROR("no capability for "DFID" opc "LPX64"\n",
+                              PFID(&lli->lli_fid), opc);
+                       atomic_set(&ll_capa_debug, 0);
+               }
+       }
        spin_unlock(&capa_lock);
 
        RETURN(ocapa);
@@ -375,24 +375,24 @@ EXPORT_SYMBOL(ll_osscapa_get);
 
 struct obd_capa *ll_mdscapa_get(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        struct obd_capa *ocapa;
-        ENTRY;
+       struct ll_inode_info *lli = ll_i2info(inode);
+       struct obd_capa *ocapa;
+       ENTRY;
 
-        LASSERT(inode != NULL);
+       LASSERT(inode != NULL);
 
-        if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
-                RETURN(NULL);
+       if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
+               RETURN(NULL);
 
        spin_lock(&capa_lock);
        ocapa = capa_get(lli->lli_mds_capa);
        spin_unlock(&capa_lock);
-        if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
-                CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
-                cfs_atomic_set(&ll_capa_debug, 0);
-        }
+       if (!ocapa && atomic_read(&ll_capa_debug)) {
+               CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
+               atomic_set(&ll_capa_debug, 0);
+       }
 
-        RETURN(ocapa);
+       RETURN(ocapa);
 }
 
 static struct obd_capa *do_add_mds_capa(struct inode *inode,
@@ -497,21 +497,21 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
 {
        spin_lock(&capa_lock);
-        ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
-                                               do_add_oss_capa(inode, ocapa);
+       ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
+                                              do_add_oss_capa(inode, ocapa);
 
-        /* truncate capa won't renew */
-        if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
-                set_capa_expiry(ocapa);
-                cfs_list_del_init(&ocapa->c_list);
-                sort_add_capa(ocapa, ll_capa_list);
+       /* truncate capa won't renew */
+       if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
+               set_capa_expiry(ocapa);
+               list_del_init(&ocapa->c_list);
+               sort_add_capa(ocapa, ll_capa_list);
 
-                update_capa_timer(ocapa, capa_renewal_time(ocapa));
-        }
+               update_capa_timer(ocapa, capa_renewal_time(ocapa));
+       }
 
        spin_unlock(&capa_lock);
 
-       cfs_atomic_set(&ll_capa_debug, 1);
+       atomic_set(&ll_capa_debug, 1);
        return ocapa;
 }
 
@@ -589,30 +589,30 @@ retry:
 
 void ll_capa_open(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
+       struct ll_inode_info *lli = ll_i2info(inode);
 
-        if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
-            == 0)
-                return;
+       if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
+           == 0)
+               return;
 
-        if (!S_ISREG(inode->i_mode))
-                return;
+       if (!S_ISREG(inode->i_mode))
+               return;
 
-        cfs_atomic_inc(&lli->lli_open_count);
+       atomic_inc(&lli->lli_open_count);
 }
 
 void ll_capa_close(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
+       struct ll_inode_info *lli = ll_i2info(inode);
 
-        if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
-            == 0)
-                return;
+       if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
+           == 0)
+               return;
 
-        if (!S_ISREG(inode->i_mode))
-                return;
+       if (!S_ISREG(inode->i_mode))
+               return;
 
-        cfs_atomic_dec(&lli->lli_open_count);
+       atomic_dec(&lli->lli_open_count);
 }
 
 /* delete CAPA_OPC_OSS_TRUNC only */
index c4cb158..8bff949 100644 (file)
@@ -337,12 +337,12 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
 
        spin_lock(&lcq->lcq_lock);
 
-        if (!cfs_list_empty(&lcq->lcq_head)) {
-                lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
-                                     lli_close_list);
-                cfs_list_del_init(&lli->lli_close_list);
-        } else if (cfs_atomic_read(&lcq->lcq_stop))
-                lli = ERR_PTR(-EALREADY);
+       if (!list_empty(&lcq->lcq_head)) {
+               lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
+                                lli_close_list);
+               list_del_init(&lli->lli_close_list);
+       } else if (atomic_read(&lcq->lcq_stop))
+               lli = ERR_PTR(-EALREADY);
 
        spin_unlock(&lcq->lcq_lock);
        return lli;
@@ -409,7 +409,7 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret)
 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 {
        init_completion(&lcq->lcq_comp);
-       cfs_atomic_inc(&lcq->lcq_stop);
+       atomic_inc(&lcq->lcq_stop);
        wake_up(&lcq->lcq_waitq);
        wait_for_completion(&lcq->lcq_comp);
        OBD_FREE(lcq, sizeof(*lcq));
index 0ac56ec..89ee6b0 100644 (file)
@@ -151,13 +151,13 @@ struct ll_inode_info {
          * for allocating OST objects after a mknod() and later open-by-FID. */
         struct lu_fid                   lli_pfid;
 
-        cfs_list_t                      lli_close_list;
-        cfs_list_t                      lli_oss_capas;
-        /* open count currently used by capability only, indicate whether
-         * capability needs renewal */
-        cfs_atomic_t                    lli_open_count;
-        struct obd_capa                *lli_mds_capa;
-        cfs_time_t                      lli_rmtperm_time;
+       struct list_head                lli_close_list;
+       struct list_head                lli_oss_capas;
+       /* open count currently used by capability only, indicate whether
+        * capability needs renewal */
+       atomic_t                    lli_open_count;
+       struct obd_capa                *lli_mds_capa;
+       cfs_time_t                      lli_rmtperm_time;
 
         /* handle is to be sent to MDS later on done_writing and setattr.
          * Open handle data are needed for the recovery to reconstruct
@@ -366,10 +366,10 @@ enum ra_stat {
 };
 
 struct ll_ra_info {
-        cfs_atomic_t              ra_cur_pages;
-        unsigned long             ra_max_pages;
-        unsigned long             ra_max_pages_per_file;
-        unsigned long             ra_max_read_ahead_whole_pages;
+       atomic_t        ra_cur_pages;
+       unsigned long   ra_max_pages;
+       unsigned long   ra_max_pages_per_file;
+       unsigned long   ra_max_read_ahead_whole_pages;
 };
 
 /* ra_io_arg will be filled in the beginning of ll_readahead with
@@ -557,18 +557,18 @@ struct ll_sb_info {
         enum stats_track_type     ll_stats_track_type;
         int                       ll_rw_stats_on;
 
-        /* metadata stat-ahead */
-        unsigned int              ll_sa_max;     /* max statahead RPCs */
-        atomic_t                  ll_sa_total;   /* statahead thread started
-                                                  * count */
-        atomic_t                  ll_sa_wrong;   /* statahead thread stopped for
-                                                  * low hit ratio */
-        atomic_t                  ll_agl_total;  /* AGL thread started count */
-
-        dev_t                     ll_sdev_orig; /* save s_dev before assign for
-                                                 * clustred nfs */
-        struct rmtacl_ctl_table   ll_rct;
-        struct eacl_table         ll_et;
+       /* metadata stat-ahead */
+       unsigned int              ll_sa_max;     /* max statahead RPCs */
+       atomic_t                  ll_sa_total;   /* statahead thread started
+                                                 * count */
+       atomic_t                  ll_sa_wrong;   /* statahead thread stopped for
+                                                 * low hit ratio */
+       atomic_t                  ll_agl_total;  /* AGL thread started count */
+
+       dev_t                     ll_sdev_orig; /* save s_dev before assign for
+                                                * clustred nfs */
+       struct rmtacl_ctl_table   ll_rct;
+       struct eacl_table         ll_et;
 };
 
 #define LL_DEFAULT_MAX_RW_CHUNK      (32 * 1024 * 1024)
@@ -978,7 +978,7 @@ struct ll_close_queue {
        cfs_list_t              lcq_head;
        wait_queue_head_t       lcq_waitq;
        struct completion       lcq_comp;
-       cfs_atomic_t            lcq_stop;
+       atomic_t                lcq_stop;
 };
 
 struct ccc_object *cl_inode2ccc(struct inode *inode);
@@ -1301,44 +1301,44 @@ void et_fini(struct eacl_table *et);
 
 /* per inode struct, for dir only */
 struct ll_statahead_info {
-        struct inode           *sai_inode;
-        cfs_atomic_t            sai_refcount;   /* when access this struct, hold
-                                                 * refcount */
-        unsigned int            sai_generation; /* generation for statahead */
-        unsigned int            sai_max;        /* max ahead of lookup */
-        __u64                   sai_sent;       /* stat requests sent count */
-        __u64                   sai_replied;    /* stat requests which received
-                                                 * reply */
-        __u64                   sai_index;      /* index of statahead entry */
-        __u64                   sai_index_wait; /* index of entry which is the
-                                                 * caller is waiting for */
-        __u64                   sai_hit;        /* hit count */
-        __u64                   sai_miss;       /* miss count:
-                                                 * for "ls -al" case, it includes
-                                                 * hidden dentry miss;
-                                                 * for "ls -l" case, it does not
-                                                 * include hidden dentry miss.
-                                                 * "sai_miss_hidden" is used for
-                                                 * the later case.
-                                                 */
+       struct inode            *sai_inode;
+       atomic_t                sai_refcount;   /* when access this struct, hold
+                                                * refcount */
+       unsigned int            sai_generation; /* generation for statahead */
+       unsigned int            sai_max;        /* max ahead of lookup */
+       __u64                   sai_sent;       /* stat requests sent count */
+       __u64                   sai_replied;    /* stat requests which received
+                                                * reply */
+       __u64                   sai_index;      /* index of statahead entry */
+       __u64                   sai_index_wait; /* index of entry which is the
+                                                * caller is waiting for */
+       __u64                   sai_hit;        /* hit count */
+       __u64                   sai_miss;       /* miss count:
+                                                * for "ls -al" case, it
+                                                * includes hidden dentry miss;
+                                                * for "ls -l" case, it does not
+                                                * include hidden dentry miss.
+                                                * "sai_miss_hidden" is used for
+                                                * the later case.
+                                                */
         unsigned int            sai_consecutive_miss; /* consecutive miss */
         unsigned int            sai_miss_hidden;/* "ls -al", but first dentry
                                                  * is not a hidden one */
         unsigned int            sai_skip_hidden;/* skipped hidden dentry count */
-       unsigned int            sai_ls_all:1,   /* "ls -al", do stat-ahead for
+       unsigned int            sai_ls_all:1,   /* "ls -al", do stat-ahead for
                                                 * hidden entries */
                                sai_in_readpage:1,/* statahead is in readdir()*/
                                sai_agl_valid:1;/* AGL is valid for the dir */
-       wait_queue_head_t       sai_waitq;      /* stat-ahead wait queue */
-       struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
-       struct ptlrpc_thread    sai_agl_thread; /* AGL thread */
-       cfs_list_t              sai_entries;    /* entry list */
-        cfs_list_t              sai_entries_received; /* entries returned */
-        cfs_list_t              sai_entries_stated;   /* entries stated */
-        cfs_list_t              sai_entries_agl; /* AGL entries to be sent */
-        cfs_list_t              sai_cache[LL_SA_CACHE_SIZE];
+       wait_queue_head_t       sai_waitq;      /* stat-ahead wait queue */
+       struct ptlrpc_thread    sai_thread;     /* stat-ahead thread */
+       struct ptlrpc_thread    sai_agl_thread; /* AGL thread */
+       struct list_head        sai_entries;    /* entry list */
+       struct list_head        sai_entries_received;   /* entries returned */
+       struct list_head        sai_entries_stated;     /* entries stated */
+       struct list_head        sai_entries_agl;  /* AGL entries to be sent */
+       struct list_head        sai_cache[LL_SA_CACHE_SIZE];
        spinlock_t              sai_cache_lock[LL_SA_CACHE_SIZE];
-       cfs_atomic_t            sai_cache_count; /* entry count in cache */
+       atomic_t                sai_cache_count; /* entry count in cache */
 };
 
 int do_statahead_enter(struct inode *dir, struct dentry **dentry,
index 4da09da..b32d795 100644 (file)
@@ -96,20 +96,20 @@ static struct ll_sb_info *ll_init_sbi(void)
        lru_page_max = pages / 2;
 
        /* initialize ll_cache data */
-       cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
+       atomic_set(&sbi->ll_cache.ccc_users, 0);
        sbi->ll_cache.ccc_lru_max = lru_page_max;
-       cfs_atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
+       atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
        spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
        CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
 
-       cfs_atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+       atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
        init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
 
-        sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
-                                           SBI_DEFAULT_READAHEAD_MAX);
-        sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
-        sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
-                                           SBI_DEFAULT_READAHEAD_WHOLE_MAX;
+       sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
+                                          SBI_DEFAULT_READAHEAD_MAX);
+       sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
+       sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
+                                          SBI_DEFAULT_READAHEAD_WHOLE_MAX;
         CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
         CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
 
@@ -137,14 +137,14 @@ static struct ll_sb_info *ll_init_sbi(void)
                               pp_w_hist.oh_lock);
         }
 
-        /* metadata statahead is enabled by default */
-        sbi->ll_sa_max = LL_SA_RPC_DEF;
-        cfs_atomic_set(&sbi->ll_sa_total, 0);
-        cfs_atomic_set(&sbi->ll_sa_wrong, 0);
-        cfs_atomic_set(&sbi->ll_agl_total, 0);
-        sbi->ll_flags |= LL_SBI_AGL_ENABLED;
+       /* metadata statahead is enabled by default */
+       sbi->ll_sa_max = LL_SA_RPC_DEF;
+       atomic_set(&sbi->ll_sa_total, 0);
+       atomic_set(&sbi->ll_sa_wrong, 0);
+       atomic_set(&sbi->ll_agl_total, 0);
+       sbi->ll_flags |= LL_SBI_AGL_ENABLED;
 
-        RETURN(sbi);
+       RETURN(sbi);
 }
 
 void ll_free_sbi(struct super_block *sb)
@@ -968,15 +968,15 @@ void ll_lli_init(struct ll_inode_info *lli)
        lli->lli_posix_acl = NULL;
        lli->lli_remote_perms = NULL;
        mutex_init(&lli->lli_rmtperm_mutex);
-        /* Do not set lli_fid, it has been initialized already. */
-        fid_zero(&lli->lli_pfid);
-        CFS_INIT_LIST_HEAD(&lli->lli_close_list);
-        CFS_INIT_LIST_HEAD(&lli->lli_oss_capas);
-        cfs_atomic_set(&lli->lli_open_count, 0);
-        lli->lli_mds_capa = NULL;
-        lli->lli_rmtperm_time = 0;
-        lli->lli_pending_och = NULL;
-        lli->lli_mds_read_och = NULL;
+       /* Do not set lli_fid, it has been initialized already. */
+       fid_zero(&lli->lli_pfid);
+       INIT_LIST_HEAD(&lli->lli_close_list);
+       INIT_LIST_HEAD(&lli->lli_oss_capas);
+       atomic_set(&lli->lli_open_count, 0);
+       lli->lli_mds_capa = NULL;
+       lli->lli_rmtperm_time = 0;
+       lli->lli_pending_och = NULL;
+       lli->lli_mds_read_och = NULL;
         lli->lli_mds_write_och = NULL;
         lli->lli_mds_exec_och = NULL;
         lli->lli_open_fd_read_count = 0;
@@ -1017,11 +1017,11 @@ void ll_lli_init(struct ll_inode_info *lli)
 
 static inline int ll_bdi_register(struct backing_dev_info *bdi)
 {
-        static atomic_t ll_bdi_num = ATOMIC_INIT(0);
+       static atomic_t ll_bdi_num = ATOMIC_INIT(0);
 
-        bdi->name = "lustre";
-        return bdi_register(bdi, NULL, "lustre-%d",
-                            atomic_inc_return(&ll_bdi_num));
+       bdi->name = "lustre";
+       return bdi_register(bdi, NULL, "lustre-%d",
+                           atomic_inc_return(&ll_bdi_num));
 }
 
 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
@@ -1154,11 +1154,11 @@ void ll_put_super(struct super_block *sb)
        if (force == 0) {
                struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
                rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
-                       cfs_atomic_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
+                       atomic_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
                        &lwi);
        }
 
-       ccc_count = cfs_atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+       ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
        if (force == 0 && rc != -EINTR)
                LASSERTF(ccc_count == 0, "count: %i\n", ccc_count);
 
@@ -1499,22 +1499,22 @@ void ll_clear_inode(struct inode *inode)
 
        ll_xattr_cache_destroy(inode);
 
-        if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-                LASSERT(lli->lli_posix_acl == NULL);
-                if (lli->lli_remote_perms) {
-                        free_rmtperm_hash(lli->lli_remote_perms);
-                        lli->lli_remote_perms = NULL;
-                }
-        }
+       if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+               LASSERT(lli->lli_posix_acl == NULL);
+               if (lli->lli_remote_perms) {
+                       free_rmtperm_hash(lli->lli_remote_perms);
+                       lli->lli_remote_perms = NULL;
+               }
+       }
 #ifdef CONFIG_FS_POSIX_ACL
-        else if (lli->lli_posix_acl) {
-                LASSERT(cfs_atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
-                LASSERT(lli->lli_remote_perms == NULL);
-                posix_acl_release(lli->lli_posix_acl);
-                lli->lli_posix_acl = NULL;
-        }
+       else if (lli->lli_posix_acl) {
+               LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
+               LASSERT(lli->lli_remote_perms == NULL);
+               posix_acl_release(lli->lli_posix_acl);
+               lli->lli_posix_acl = NULL;
+       }
 #endif
-        lli->lli_inode_magic = LLI_INODE_DEAD;
+       lli->lli_inode_magic = LLI_INODE_DEAD;
 
        ll_clear_inode_capas(inode);
        if (S_ISDIR(inode->i_mode))
@@ -2320,22 +2320,22 @@ int ll_flush_ctx(struct inode *inode)
 /* umount -f client means force down, don't save state */
 void ll_umount_begin(struct super_block *sb)
 {
-        struct ll_sb_info *sbi = ll_s2sbi(sb);
-        struct obd_device *obd;
-        struct obd_ioctl_data *ioc_data;
-        ENTRY;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
+       struct obd_device *obd;
+       struct obd_ioctl_data *ioc_data;
+       ENTRY;
 
-        CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
-               sb->s_count, atomic_read(&sb->s_active));
+       CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
+              sb->s_count, atomic_read(&sb->s_active));
 
-        obd = class_exp2obd(sbi->ll_md_exp);
-        if (obd == NULL) {
-                CERROR("Invalid MDC connection handle "LPX64"\n",
-                       sbi->ll_md_exp->exp_handle.h_cookie);
-                EXIT;
-                return;
-        }
-        obd->obd_force = 1;
+       obd = class_exp2obd(sbi->ll_md_exp);
+       if (obd == NULL) {
+               CERROR("Invalid MDC connection handle "LPX64"\n",
+                      sbi->ll_md_exp->exp_handle.h_cookie);
+               EXIT;
+               return;
+       }
+       obd->obd_force = 1;
 
         obd = class_exp2obd(sbi->ll_dt_exp);
         if (obd == NULL) {
index 77ec4d5..085dc7e 100644 (file)
@@ -432,14 +432,14 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  */
 static void ll_vm_open(struct vm_area_struct * vma)
 {
-        struct inode *inode    = vma->vm_file->f_dentry->d_inode;
-        struct ccc_object *vob = cl_inode2ccc(inode);
+       struct inode *inode    = vma->vm_file->f_dentry->d_inode;
+       struct ccc_object *vob = cl_inode2ccc(inode);
 
-        ENTRY;
-        LASSERT(vma->vm_file);
-        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
-        cfs_atomic_inc(&vob->cob_mmap_cnt);
-        EXIT;
+       ENTRY;
+       LASSERT(vma->vm_file);
+       LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+       atomic_inc(&vob->cob_mmap_cnt);
+       EXIT;
 }
 
 /**
@@ -447,14 +447,14 @@ static void ll_vm_open(struct vm_area_struct * vma)
  */
 static void ll_vm_close(struct vm_area_struct *vma)
 {
-        struct inode      *inode = vma->vm_file->f_dentry->d_inode;
-        struct ccc_object *vob   = cl_inode2ccc(inode);
+       struct inode      *inode = vma->vm_file->f_dentry->d_inode;
+       struct ccc_object *vob   = cl_inode2ccc(inode);
 
-        ENTRY;
-        LASSERT(vma->vm_file);
-        cfs_atomic_dec(&vob->cob_mmap_cnt);
-        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
-        EXIT;
+       ENTRY;
+       LASSERT(vma->vm_file);
+       atomic_dec(&vob->cob_mmap_cnt);
+       LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+       EXIT;
 }
 
 /* return the user space pointer that maps to a file offset via a vma */
index 90bf646..0e5c38f 100644 (file)
@@ -138,7 +138,7 @@ struct lloop_device {
        int                     lo_state;
        struct semaphore        lo_sem;
        struct mutex            lo_ctl_mutex;
-       cfs_atomic_t            lo_pending;
+       atomic_t                lo_pending;
        wait_queue_head_t       lo_bh_wait;
 
        struct request_queue *lo_queue;
@@ -284,7 +284,7 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
                lo->lo_bio = lo->lo_biotail = bio;
        spin_unlock_irqrestore(&lo->lo_lock, flags);
 
-       cfs_atomic_inc(&lo->lo_pending);
+       atomic_inc(&lo->lo_pending);
        if (waitqueue_active(&lo->lo_bh_wait))
                wake_up(&lo->lo_bh_wait);
 }
@@ -400,8 +400,8 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
 
 static inline int loop_active(struct lloop_device *lo)
 {
-        return cfs_atomic_read(&lo->lo_pending) ||
-                (lo->lo_state == LLOOP_RUNDOWN);
+       return atomic_read(&lo->lo_pending) ||
+              (lo->lo_state == LLOOP_RUNDOWN);
 }
 
 /*
@@ -440,7 +440,7 @@ static int loop_thread(void *data)
 
        for (;;) {
                wait_event(lo->lo_bh_wait, loop_active(lo));
-               if (!cfs_atomic_read(&lo->lo_pending)) {
+               if (!atomic_read(&lo->lo_pending)) {
                        int exiting = 0;
                        spin_lock_irq(&lo->lo_lock);
                        exiting = (lo->lo_state == LLOOP_RUNDOWN);
@@ -463,21 +463,21 @@ static int loop_thread(void *data)
                 } else {
                         times++;
                 }
-                if ((times & 127) == 0) {
-                        CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
-                               total_count, times, total_count / times);
-                }
+               if ((times & 127) == 0) {
+                       CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
+                              total_count, times, total_count / times);
+               }
 
-                LASSERT(bio != NULL);
-                LASSERT(count <= cfs_atomic_read(&lo->lo_pending));
-                loop_handle_bio(lo, bio);
-                cfs_atomic_sub(count, &lo->lo_pending);
-        }
-        cl_env_put(env, &refcheck);
+               LASSERT(bio != NULL);
+               LASSERT(count <= atomic_read(&lo->lo_pending));
+               loop_handle_bio(lo, bio);
+               atomic_sub(count, &lo->lo_pending);
+       }
+       cl_env_put(env, &refcheck);
 
 out:
        up(&lo->lo_sem);
-        return ret;
+       return ret;
 }
 
 static int loop_set_fd(struct lloop_device *lo, struct file *unused,
index 8c4a148..c716538 100644 (file)
@@ -385,14 +385,14 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        int unused_mb;
 
        max_cached_mb = cache->ccc_lru_max >> shift;
-       unused_mb = cfs_atomic_read(&cache->ccc_lru_left) >> shift;
+       unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
        return seq_printf(m,
                        "users: %d\n"
                        "max_cached_mb: %d\n"
                        "used_mb: %d\n"
                        "unused_mb: %d\n"
                        "reclaim_count: %u\n",
-                       cfs_atomic_read(&cache->ccc_users),
+                       atomic_read(&cache->ccc_users),
                        max_cached_mb,
                        max_cached_mb - unused_mb,
                        unused_mb,
@@ -436,7 +436,7 @@ ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
 
        /* easy - add more LRU slots. */
        if (diff >= 0) {
-               cfs_atomic_add(diff, &cache->ccc_lru_left);
+               atomic_add(diff, &cache->ccc_lru_left);
                GOTO(out, rc = 0);
        }
 
@@ -452,12 +452,12 @@ ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
                do {
                        int ov, nv;
 
-                       ov = cfs_atomic_read(&cache->ccc_lru_left);
+                       ov = atomic_read(&cache->ccc_lru_left);
                        if (ov == 0)
                                break;
 
                        nv = ov > diff ? ov - diff : 0;
-                       rc = cfs_atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+                       rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
                        if (likely(ov == rc)) {
                                diff -= ov - nv;
                                nrpages += ov - nv;
@@ -486,7 +486,7 @@ out:
                spin_unlock(&sbi->ll_lock);
                rc = count;
        } else {
-               cfs_atomic_add(nrpages, &cache->ccc_lru_left);
+               atomic_add(nrpages, &cache->ccc_lru_left);
        }
        return rc;
 }
@@ -814,7 +814,7 @@ static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
        struct cl_client_cache  *cache = &sbi->ll_cache;
        int pages, mb;
 
-       pages = cfs_atomic_read(&cache->ccc_unstable_nr);
+       pages = atomic_read(&cache->ccc_unstable_nr);
        mb    = (pages * PAGE_CACHE_SIZE) >> 20;
 
        return seq_printf(m, "unstable_pages: %8d\n"
index da5734f..9668c3c 100644 (file)
@@ -65,7 +65,7 @@ struct ll_sa_entry {
        /* link into sai hash table locally */
        cfs_list_t              se_hash;
        /* entry reference count */
-       cfs_atomic_t            se_refcount;
+       atomic_t            se_refcount;
        /* entry index in the sai */
        __u64                   se_index;
        /* low layer ldlm lock handle */
@@ -154,7 +154,7 @@ agl_first_entry(struct ll_statahead_info *sai)
 
 static inline int sa_sent_full(struct ll_statahead_info *sai)
 {
-        return cfs_atomic_read(&sai->sai_cache_count) >= sai->sai_max;
+       return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
 }
 
 static inline int sa_received_empty(struct ll_statahead_info *sai)
@@ -212,47 +212,47 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
 
         entry->se_index = index;
 
-        /*
-         * Statahead entry reference rules:
-         *
-         * 1) When statahead entry is initialized, its reference is set as 2.
-         *    One reference is used by the directory scanner. When the scanner
-         *    searches the statahead cache for the given name, it can perform
-         *    lockless hash lookup (only the scanner can remove entry from hash
-         *    list), and once found, it needn't to call "atomic_inc()" for the
-         *    entry reference. So the performance is improved. After using the
-         *    statahead entry, the scanner will call "atomic_dec()" to drop the
-         *    reference held when initialization. If it is the last reference,
-         *    the statahead entry will be freed.
-         *
-         * 2) All other threads, including statahead thread and ptlrpcd thread,
-         *    when they process the statahead entry, the reference for target
-         *    should be held to guarantee the entry will not be released by the
-         *    directory scanner. After processing the entry, these threads will
-         *    drop the entry reference. If it is the last reference, the entry
-         *    will be freed.
-         *
-         *    The second reference when initializes the statahead entry is used
-         *    by the statahead thread, following the rule 2).
-         */
-        cfs_atomic_set(&entry->se_refcount, 2);
-        entry->se_stat = SA_ENTRY_INIT;
-        entry->se_size = entry_size;
-        dname = (char *)entry + sizeof(struct ll_sa_entry);
-        memcpy(dname, name, len);
-        dname[len] = 0;
-        entry->se_qstr.hash = full_name_hash(name, len);
-        entry->se_qstr.len = len;
-        entry->se_qstr.name = dname;
-
-        lli = ll_i2info(sai->sai_inode);
+       /*
+        * Statahead entry reference rules:
+        *
+        * 1) When statahead entry is initialized, its reference is set as 2.
+        *    One reference is used by the directory scanner. When the scanner
+        *    searches the statahead cache for the given name, it can perform
+        *    lockless hash lookup (only the scanner can remove entry from hash
+        *    list), and once found, it needn't to call "atomic_inc()" for the
+        *    entry reference. So the performance is improved. After using the
+        *    statahead entry, the scanner will call "atomic_dec()" to drop the
+        *    reference held when initialization. If it is the last reference,
+        *    the statahead entry will be freed.
+        *
+        * 2) All other threads, including statahead thread and ptlrpcd thread,
+        *    when they process the statahead entry, the reference for target
+        *    should be held to guarantee the entry will not be released by the
+        *    directory scanner. After processing the entry, these threads will
+        *    drop the entry reference. If it is the last reference, the entry
+        *    will be freed.
+        *
+        *    The second reference when initializes the statahead entry is used
+        *    by the statahead thread, following the rule 2).
+        */
+       atomic_set(&entry->se_refcount, 2);
+       entry->se_stat = SA_ENTRY_INIT;
+       entry->se_size = entry_size;
+       dname = (char *)entry + sizeof(struct ll_sa_entry);
+       memcpy(dname, name, len);
+       dname[len] = 0;
+       entry->se_qstr.hash = full_name_hash(name, len);
+       entry->se_qstr.len = len;
+       entry->se_qstr.name = dname;
+
+       lli = ll_i2info(sai->sai_inode);
        spin_lock(&lli->lli_sa_lock);
        cfs_list_add_tail(&entry->se_link, &sai->sai_entries);
        CFS_INIT_LIST_HEAD(&entry->se_list);
        ll_sa_entry_enhash(sai, entry);
        spin_unlock(&lli->lli_sa_lock);
 
-       cfs_atomic_inc(&sai->sai_cache_count);
+       atomic_inc(&sai->sai_cache_count);
 
        RETURN(entry);
 }
@@ -294,7 +294,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
        cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) {
                if (entry->se_index == index) {
                        LASSERT(atomic_read(&entry->se_refcount) > 0);
-                       cfs_atomic_inc(&entry->se_refcount);
+                       atomic_inc(&entry->se_refcount);
                        return entry;
                 }
                if (entry->se_index > index)
@@ -325,7 +325,7 @@ static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
 static void ll_sa_entry_put(struct ll_statahead_info *sai,
                              struct ll_sa_entry *entry)
 {
-       if (cfs_atomic_dec_and_test(&entry->se_refcount)) {
+       if (atomic_dec_and_test(&entry->se_refcount)) {
                CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
                       entry->se_qstr.len, entry->se_qstr.name, entry,
                       entry->se_index);
@@ -339,7 +339,7 @@ static void ll_sa_entry_put(struct ll_statahead_info *sai,
                        iput(entry->se_inode);
 
                OBD_FREE(entry, entry->se_size);
-               cfs_atomic_dec(&sai->sai_cache_count);
+               atomic_dec(&sai->sai_cache_count);
        }
 }
 
@@ -471,7 +471,7 @@ static struct ll_statahead_info *ll_sai_alloc(void)
        if (!sai)
                RETURN(NULL);
 
-       cfs_atomic_set(&sai->sai_refcount, 1);
+       atomic_set(&sai->sai_refcount, 1);
 
        spin_lock(&sai_generation_lock);
        sai->sai_generation = ++sai_generation;
@@ -494,7 +494,7 @@ static struct ll_statahead_info *ll_sai_alloc(void)
                CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
                spin_lock_init(&sai->sai_cache_lock[i]);
        }
-       cfs_atomic_set(&sai->sai_cache_count, 0);
+       atomic_set(&sai->sai_cache_count, 0);
 
        RETURN(sai);
 }
@@ -502,22 +502,22 @@ static struct ll_statahead_info *ll_sai_alloc(void)
 static inline struct ll_statahead_info *
 ll_sai_get(struct ll_statahead_info *sai)
 {
-        cfs_atomic_inc(&sai->sai_refcount);
-        return sai;
+       atomic_inc(&sai->sai_refcount);
+       return sai;
 }
 
 static void ll_sai_put(struct ll_statahead_info *sai)
 {
-        struct inode         *inode = sai->sai_inode;
-        struct ll_inode_info *lli   = ll_i2info(inode);
-        ENTRY;
+       struct inode         *inode = sai->sai_inode;
+       struct ll_inode_info *lli   = ll_i2info(inode);
+       ENTRY;
 
-        if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
-                struct ll_sa_entry *entry, *next;
+       if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
+               struct ll_sa_entry *entry, *next;
 
-                if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
-                        /* It is race case, the interpret callback just hold
-                         * a reference count */
+               if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+                       /* It is race case, the interpret callback just hold
+                        * a reference count */
                        spin_unlock(&lli->lli_sa_lock);
                        RETURN_EXIT;
                }
@@ -544,14 +544,14 @@ static void ll_sai_put(struct ll_statahead_info *sai)
                LASSERT(sa_received_empty(sai));
                LASSERT(list_empty(&sai->sai_entries_stated));
 
-                LASSERT(cfs_atomic_read(&sai->sai_cache_count) == 0);
-                LASSERT(agl_list_empty(sai));
+               LASSERT(atomic_read(&sai->sai_cache_count) == 0);
+               LASSERT(agl_list_empty(sai));
 
-                iput(inode);
-                OBD_FREE_PTR(sai);
-        }
+               iput(inode);
+               OBD_FREE_PTR(sai);
+       }
 
-        EXIT;
+       EXIT;
 }
 
 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
@@ -636,7 +636,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
                RETURN_EXIT;
        }
        entry = sa_first_received_entry(sai);
-       cfs_atomic_inc(&entry->se_refcount);
+       atomic_inc(&entry->se_refcount);
        cfs_list_del_init(&entry->se_list);
        spin_unlock(&lli->lli_sa_lock);
 
@@ -1456,10 +1456,10 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
         } else {
                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
 
-                sai->sai_miss++;
-                sai->sai_consecutive_miss++;
-                if (sa_low_hit(sai) && thread_is_running(thread)) {
-                        atomic_inc(&sbi->ll_sa_wrong);
+               sai->sai_miss++;
+               sai->sai_consecutive_miss++;
+               if (sa_low_hit(sai) && thread_is_running(thread)) {
+                       atomic_inc(&sbi->ll_sa_wrong);
                        CDEBUG(D_READA, "Statahead for dir "DFID" hit "
                               "ratio too low: hit/miss "LPU64"/"LPU64
                               ", sent/replied "LPU64"/"LPU64", stopping "
index 51096da..54d99f0 100644 (file)
  * ordered within themselves by weights assigned from other layers.
  */
 static unsigned long vvp_lock_weigh(const struct lu_env *env,
-                                    const struct cl_lock_slice *slice)
+                                   const struct cl_lock_slice *slice)
 {
-        struct ccc_object *cob = cl2ccc(slice->cls_obj);
+       struct ccc_object *cob = cl2ccc(slice->cls_obj);
 
-        ENTRY;
-        RETURN(cfs_atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
+       ENTRY;
+       RETURN(atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
 }
 
 static const struct cl_lock_operations vvp_lock_ops = {
index 2efa270..9ee04b7 100644 (file)
  */
 
 static int vvp_object_print(const struct lu_env *env, void *cookie,
-                            lu_printer_t p, const struct lu_object *o)
+                           lu_printer_t p, const struct lu_object *o)
 {
-        struct ccc_object    *obj   = lu2ccc(o);
-        struct inode         *inode = obj->cob_inode;
-        struct ll_inode_info *lli;
-
-        (*p)(env, cookie, "(%s %d %d) inode: %p ",
-             cfs_list_empty(&obj->cob_pending_list) ? "-" : "+",
-             obj->cob_transient_pages, cfs_atomic_read(&obj->cob_mmap_cnt),
-             inode);
-        if (inode) {
-                lli = ll_i2info(inode);
-                (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
-                     inode->i_ino, inode->i_generation, inode->i_mode,
-                     inode->i_nlink, atomic_read(&inode->i_count),
-                     lli->lli_clob, PFID(&lli->lli_fid));
-        }
-        return 0;
+       struct ccc_object    *obj   = lu2ccc(o);
+       struct inode         *inode = obj->cob_inode;
+       struct ll_inode_info *lli;
+
+       (*p)(env, cookie, "(%s %d %d) inode: %p ",
+            list_empty(&obj->cob_pending_list) ? "-" : "+",
+            obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
+            inode);
+       if (inode) {
+               lli = ll_i2info(inode);
+               (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
+                    inode->i_ino, inode->i_generation, inode->i_mode,
+                    inode->i_nlink, atomic_read(&inode->i_count),
+                    lli->lli_clob, PFID(&lli->lli_fid));
+       }
+       return 0;
 }
 
 static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
index b7a9544..298d0db 100644 (file)
@@ -486,9 +486,9 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
                }
         }
 
-        tgt->ltd_active = 1;
-        tgt->ltd_exp = mdc_exp;
-        lmv->desc.ld_active_tgt_count++;
+       tgt->ltd_active = 1;
+       tgt->ltd_exp = mdc_exp;
+       lmv->desc.ld_active_tgt_count++;
 
        md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
                        lmv->max_cookiesize, lmv->max_def_cookiesize);
index 1d4849f..8cccfd1 100644 (file)
@@ -683,7 +683,7 @@ static struct lu_device *lod_device_free(const struct lu_env *env,
        struct lu_device  *next = &lod->lod_child->dd_lu_dev;
        ENTRY;
 
-       LASSERT(cfs_atomic_read(&lu->ld_ref) == 0);
+       LASSERT(atomic_read(&lu->ld_ref) == 0);
        dt_device_fini(&lod->lod_dt_dev);
        OBD_FREE_PTR(lod);
        RETURN(next);
index 98b2416..149507f 100644 (file)
@@ -74,9 +74,9 @@ void lod_pool_putref(struct pool_desc *pool)
 void lod_pool_putref_locked(struct pool_desc *pool)
 {
        CDEBUG(D_INFO, "pool %p\n", pool);
-       LASSERT(cfs_atomic_read(&pool->pool_refcount) > 1);
+       LASSERT(atomic_read(&pool->pool_refcount) > 1);
 
-       cfs_atomic_dec(&pool->pool_refcount);
+       atomic_dec(&pool->pool_refcount);
 }
 
 
index d933b8f..55603ee 100644 (file)
@@ -226,7 +226,7 @@ struct lov_object {
         * How many IOs are on going on this object. Layout can be changed
         * only if there is no active IO.
         */
-       cfs_atomic_t           lo_active_ios;
+       atomic_t               lo_active_ios;
        /**
         * Waitq - wait for no one else is using lo_lsm
         */
index 72d6ee6..f24abbd 100644 (file)
@@ -98,15 +98,15 @@ struct lov_request {
 
 struct lov_request_set {
        struct obd_info         *set_oi;
-       cfs_atomic_t             set_refcount;
+       atomic_t                 set_refcount;
        struct obd_export       *set_exp;
        /* XXX: There is @set_exp already, however obd_statfs gets
           obd_device only. */
        struct obd_device       *set_obd;
        int                      set_count;
-       cfs_atomic_t             set_completes;
-       cfs_atomic_t             set_success;
-       cfs_atomic_t             set_finish_checked;
+       atomic_t                 set_completes;
+       atomic_t                 set_success;
+       atomic_t                 set_finish_checked;
        struct llog_cookie      *set_cookies;
        cfs_list_t               set_list;
        wait_queue_head_t        set_waitq;
@@ -118,8 +118,8 @@ void lov_finish_set(struct lov_request_set *set);
 
 static inline void lov_put_reqset(struct lov_request_set *set)
 {
-        if (cfs_atomic_dec_and_test(&set->set_refcount))
-                lov_finish_set(set);
+       if (atomic_dec_and_test(&set->set_refcount))
+               lov_finish_set(set);
 }
 
 #define lov_uuid2str(lv, index) \
@@ -242,8 +242,8 @@ void lov_pool_putref(struct pool_desc *pool);
 
 static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
 {
-       LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 0);
-       cfs_atomic_inc(&lsm->lsm_refc);
+       LASSERT(atomic_read(&lsm->lsm_refc) > 0);
+       atomic_inc(&lsm->lsm_refc);
        return lsm;
 }
 
index 9d62e2c..bab17f7 100644 (file)
@@ -367,17 +367,17 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
        struct lov_object *lov = cl2lov(ios->cis_obj);
        int i;
 
-        ENTRY;
-        if (lio->lis_subs != NULL) {
-                for (i = 0; i < lio->lis_nr_subios; i++)
-                        lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
-                OBD_FREE_LARGE(lio->lis_subs,
-                         lio->lis_nr_subios * sizeof lio->lis_subs[0]);
-                lio->lis_nr_subios = 0;
-        }
+       ENTRY;
+       if (lio->lis_subs != NULL) {
+               for (i = 0; i < lio->lis_nr_subios; i++)
+                       lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+               OBD_FREE_LARGE(lio->lis_subs,
+                        lio->lis_nr_subios * sizeof lio->lis_subs[0]);
+               lio->lis_nr_subios = 0;
+       }
 
-       LASSERT(cfs_atomic_read(&lov->lo_active_ios) > 0);
-       if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
+       LASSERT(atomic_read(&lov->lo_active_ios) > 0);
+       if (atomic_dec_and_test(&lov->lo_active_ios))
                wake_up_all(&lov->lo_waitq);
        EXIT;
 }
@@ -822,7 +822,7 @@ static void lov_empty_io_fini(const struct lu_env *env,
        struct lov_object *lov = cl2lov(ios->cis_obj);
        ENTRY;
 
-       if (cfs_atomic_dec_and_test(&lov->lo_active_ios))
+       if (atomic_dec_and_test(&lov->lo_active_ios))
                wake_up_all(&lov->lo_waitq);
        EXIT;
 }
@@ -882,22 +882,22 @@ static const struct cl_io_operations lov_empty_io_ops = {
 };
 
 int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
-                      struct cl_io *io)
+                     struct cl_io *io)
 {
-        struct lov_io       *lio = lov_env_io(env);
-        struct lov_object   *lov = cl2lov(obj);
+       struct lov_io       *lio = lov_env_io(env);
+       struct lov_object   *lov = cl2lov(obj);
 
-        ENTRY;
-        CFS_INIT_LIST_HEAD(&lio->lis_active);
-        lov_io_slice_init(lio, lov, io);
-        if (io->ci_result == 0) {
-                io->ci_result = lov_io_subio_init(env, lio, io);
-                if (io->ci_result == 0) {
-                        cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
-                       cfs_atomic_inc(&lov->lo_active_ios);
+       ENTRY;
+       INIT_LIST_HEAD(&lio->lis_active);
+       lov_io_slice_init(lio, lov, io);
+       if (io->ci_result == 0) {
+               io->ci_result = lov_io_subio_init(env, lio, io);
+               if (io->ci_result == 0) {
+                       cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
+                       atomic_inc(&lov->lo_active_ios);
                }
-        }
-        RETURN(io->ci_result);
+       }
+       RETURN(io->ci_result);
 }
 
 int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
@@ -920,18 +920,18 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
        case CIT_SETATTR:
                result = +1;
                break;
-        case CIT_WRITE:
-                result = -EBADF;
-                break;
-        case CIT_FAULT:
-                result = -EFAULT;
-                CERROR("Page fault on a file without stripes: "DFID"\n",
-                       PFID(lu_object_fid(&obj->co_lu)));
-                break;
-        }
-        if (result == 0) {
-                cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
-               cfs_atomic_inc(&lov->lo_active_ios);
+       case CIT_WRITE:
+               result = -EBADF;
+               break;
+       case CIT_FAULT:
+               result = -EFAULT;
+               CERROR("Page fault on a file without stripes: "DFID"\n",
+                      PFID(lu_object_fid(&obj->co_lu)));
+               break;
+       }
+       if (result == 0) {
+               cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
+               atomic_inc(&lov->lo_active_ios);
        }
 
        io->ci_result = result < 0 ? result : 0;
@@ -975,7 +975,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
        }
        if (result == 0) {
                cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
-               cfs_atomic_inc(&lov->lo_active_ios);
+               atomic_inc(&lov->lo_active_ios);
        }
 
        io->ci_result = result < 0 ? result : 0;
index 9d47ec5..d8b508e 100644 (file)
    Any function that expects lov_tgts to remain stationary must take a ref. */
 static void lov_getref(struct obd_device *obd)
 {
-        struct lov_obd *lov = &obd->u.lov;
+       struct lov_obd *lov = &obd->u.lov;
 
-        /* nobody gets through here until lov_putref is done */
+       /* nobody gets through here until lov_putref is done */
        mutex_lock(&lov->lov_lock);
-        cfs_atomic_inc(&lov->lov_refcount);
+       atomic_inc(&lov->lov_refcount);
        mutex_unlock(&lov->lov_lock);
-        return;
+       return;
 }
 
 static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt);
 
 static void lov_putref(struct obd_device *obd)
 {
-        struct lov_obd *lov = &obd->u.lov;
+       struct lov_obd *lov = &obd->u.lov;
 
        mutex_lock(&lov->lov_lock);
-        /* ok to dec to 0 more than once -- ltd_exp's will be null */
-        if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
-                CFS_LIST_HEAD(kill);
-                int i;
-                struct lov_tgt_desc *tgt, *n;
-                CDEBUG(D_CONFIG, "destroying %d lov targets\n",
-                       lov->lov_death_row);
-                for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+       /* ok to dec to 0 more than once -- ltd_exp's will be null */
+       if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
+               struct list_head kill = LIST_HEAD_INIT(kill);
+               struct lov_tgt_desc *tgt, *n;
+               int i;
+
+               CDEBUG(D_CONFIG, "destroying %d lov targets\n",
+                      lov->lov_death_row);
+               for (i = 0; i < lov->desc.ld_tgt_count; i++) {
                         tgt = lov->lov_tgts[i];
 
                         if (!tgt || !tgt->ltd_reap)
@@ -809,13 +810,13 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         lov_fix_desc(desc);
 
-        desc->ld_active_tgt_count = 0;
-        lov->desc = *desc;
-        lov->lov_tgt_size = 0;
+       desc->ld_active_tgt_count = 0;
+       lov->desc = *desc;
+       lov->lov_tgt_size = 0;
 
        mutex_init(&lov->lov_lock);
-        cfs_atomic_set(&lov->lov_refcount, 0);
-        lov->lov_sp_me = LUSTRE_SP_CLI;
+       atomic_set(&lov->lov_refcount, 0);
+       lov->lov_sp_me = LUSTRE_SP_CLI;
 
        init_rwsem(&lov->lov_notify_lock);
 
@@ -899,21 +900,21 @@ static int lov_cleanup(struct obd_device *obd)
                 int i;
                 obd_getref(obd);
                 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
-                        if (!lov->lov_tgts[i])
-                                continue;
-
-                        /* Inactive targets may never have connected */
-                        if (lov->lov_tgts[i]->ltd_active ||
-                            cfs_atomic_read(&lov->lov_refcount))
-                            /* We should never get here - these
-                               should have been removed in the
-                             disconnect. */
-                                CERROR("lov tgt %d not cleaned!"
-                                       " deathrow=%d, lovrc=%d\n",
-                                       i, lov->lov_death_row,
-                                       cfs_atomic_read(&lov->lov_refcount));
-                        lov_del_target(obd, i, 0, 0);
-                }
+                       if (!lov->lov_tgts[i])
+                               continue;
+
+                       /* Inactive targets may never have connected */
+                       if (lov->lov_tgts[i]->ltd_active ||
+                           atomic_read(&lov->lov_refcount))
+                               /* We should never get here - these
+                                * should have been removed in the
+                                * disconnect. */
+                               CERROR("%s: lov tgt %d not cleaned! "
+                                      "deathrow=%d, lovrc=%d\n",
+                                      obd->obd_name, i, lov->lov_death_row,
+                                      atomic_read(&lov->lov_refcount));
+                       lov_del_target(obd, i, 0, 0);
+               }
                 obd_putref(obd);
                 OBD_FREE(lov->lov_tgts, sizeof(*lov->lov_tgts) *
                          lov->lov_tgt_size);
@@ -1128,17 +1129,17 @@ out:
 }
 
 static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
-                                 void *data, int rc)
+                                void *data, int rc)
 {
-        struct lov_request_set *lovset = (struct lov_request_set *)data;
-        int err;
-        ENTRY;
+       struct lov_request_set *lovset = (struct lov_request_set *)data;
+       int err;
+       ENTRY;
 
-        /* don't do attribute merge if this aysnc op failed */
-        if (rc)
-                cfs_atomic_set(&lovset->set_completes, 0);
-        err = lov_fini_getattr_set(lovset);
-        RETURN(rc ? rc : err);
+       /* don't do attribute merge if this aysnc op failed */
+       if (rc)
+               atomic_set(&lovset->set_completes, 0);
+       err = lov_fini_getattr_set(lovset);
+       RETURN(rc ? rc : err);
 }
 
 static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -1194,23 +1195,23 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
                 RETURN(rc);
         }
 out:
-        if (rc)
-                cfs_atomic_set(&lovset->set_completes, 0);
-        err = lov_fini_getattr_set(lovset);
-        RETURN(rc ? rc : err);
+       if (rc)
+               atomic_set(&lovset->set_completes, 0);
+       err = lov_fini_getattr_set(lovset);
+       RETURN(rc ? rc : err);
 }
 
 static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
-                                 void *data, int rc)
+                                void *data, int rc)
 {
-        struct lov_request_set *lovset = (struct lov_request_set *)data;
-        int err;
-        ENTRY;
+       struct lov_request_set *lovset = (struct lov_request_set *)data;
+       int err;
+       ENTRY;
 
-        if (rc)
-                cfs_atomic_set(&lovset->set_completes, 0);
-        err = lov_fini_setattr_set(lovset);
-        RETURN(rc ? rc : err);
+       if (rc)
+               atomic_set(&lovset->set_completes, 0);
+       err = lov_fini_setattr_set(lovset);
+       RETURN(rc ? rc : err);
 }
 
 /* If @oti is given, the request goes from MDS and responses from OSTs are not
@@ -1268,20 +1269,20 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
                }
        }
 
-        /* If we are not waiting for responses on async requests, return. */
-        if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
-                int err;
-                if (rc)
-                        cfs_atomic_set(&set->set_completes, 0);
-                err = lov_fini_setattr_set(set);
-                RETURN(rc ? rc : err);
-        }
+       /* If we are not waiting for responses on async requests, return. */
+       if (rc || !rqset || cfs_list_empty(&rqset->set_requests)) {
+               int err;
+               if (rc)
+                       atomic_set(&set->set_completes, 0);
+               err = lov_fini_setattr_set(set);
+               RETURN(rc ? rc : err);
+       }
 
-        LASSERT(rqset->set_interpret == NULL);
-        rqset->set_interpret = lov_setattr_interpret;
-        rqset->set_arg = (void *)set;
+       LASSERT(rqset->set_interpret == NULL);
+       rqset->set_interpret = lov_setattr_interpret;
+       rqset->set_arg = (void *)set;
 
-        RETURN(0);
+       RETURN(0);
 }
 
 static int lov_change_cbdata(struct obd_export *exp,
@@ -1353,15 +1354,15 @@ static int lov_find_cbdata(struct obd_export *exp,
 
 int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
 {
-        struct lov_request_set *lovset = (struct lov_request_set *)data;
-        int err;
-        ENTRY;
+       struct lov_request_set *lovset = (struct lov_request_set *)data;
+       int err;
+       ENTRY;
 
-        if (rc)
-                cfs_atomic_set(&lovset->set_completes, 0);
+       if (rc)
+               atomic_set(&lovset->set_completes, 0);
 
-        err = lov_fini_statfs_set(lovset);
-        RETURN(rc ? rc : err);
+       err = lov_fini_statfs_set(lovset);
+       RETURN(rc ? rc : err);
 }
 
 static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -1391,18 +1392,18 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
                         break;
         }
 
-        if (rc || cfs_list_empty(&rqset->set_requests)) {
-                int err;
-                if (rc)
-                        cfs_atomic_set(&set->set_completes, 0);
-                err = lov_fini_statfs_set(set);
-                RETURN(rc ? rc : err);
-        }
+       if (rc || cfs_list_empty(&rqset->set_requests)) {
+               int err;
+               if (rc)
+                       atomic_set(&set->set_completes, 0);
+               err = lov_fini_statfs_set(set);
+               RETURN(rc ? rc : err);
+       }
 
-        LASSERT(rqset->set_interpret == NULL);
-        rqset->set_interpret = lov_statfs_interpret;
-        rqset->set_arg = (void *)set;
-        RETURN(0);
+       LASSERT(rqset->set_interpret == NULL);
+       rqset->set_interpret = lov_statfs_interpret;
+       rqset->set_arg = (void *)set;
+       RETURN(0);
 }
 
 static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
index 13ed111..1bb86bc 100644 (file)
@@ -438,7 +438,7 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
 
        (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
                r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
-               lsm->lsm_magic, cfs_atomic_read(&lsm->lsm_refc),
+               lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
                lsm->lsm_stripe_count, lsm->lsm_layout_gen);
        for (i = 0; i < r0->lo_nr; ++i) {
                struct lu_object *sub;
@@ -462,7 +462,7 @@ static int lov_print_released(const struct lu_env *env, void *cookie,
        (*p)(env, cookie,
                "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
                lov->lo_layout_invalid ? "invalid" : "valid", lsm,
-               lsm->lsm_magic, cfs_atomic_read(&lsm->lsm_refc),
+               lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
                lsm->lsm_stripe_count, lsm->lsm_layout_gen);
        return 0;
 }
@@ -499,7 +499,7 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
         * hit this assertion.
         * Anyway, it's still okay to call attr_get w/o type guard as layout
         * can't go if locks exist. */
-       /* LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 1); */
+       /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
 
        if (!r0->lo_attr_valid) {
                struct lov_stripe_md    *lsm = lov->lo_lsm;
@@ -672,13 +672,13 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
        struct l_wait_info lwi = { 0 };
        ENTRY;
 
-       while (cfs_atomic_read(&lov->lo_active_ios) > 0) {
+       while (atomic_read(&lov->lo_active_ios) > 0) {
                CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
                        PFID(lu_object_fid(lov2lu(lov))),
-                       cfs_atomic_read(&lov->lo_active_ios));
+                       atomic_read(&lov->lo_active_ios));
 
                l_wait_event(lov->lo_waitq,
-                            cfs_atomic_read(&lov->lo_active_ios) == 0, &lwi);
+                            atomic_read(&lov->lo_active_ios) == 0, &lwi);
        }
        RETURN(0);
 }
@@ -724,7 +724,7 @@ static int lov_layout_change(const struct lu_env *unused,
        if (result == 0) {
                old_ops->llo_fini(env, lov, &lov->u);
 
-               LASSERT(cfs_atomic_read(&lov->lo_active_ios) == 0);
+               LASSERT(atomic_read(&lov->lo_active_ios) == 0);
 
                lov->lo_type = LLT_EMPTY;
                result = new_ops->llo_init(env,
@@ -762,7 +762,7 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
 
         ENTRY;
        init_rwsem(&lov->lo_type_guard);
-       cfs_atomic_set(&lov->lo_active_ios, 0);
+       atomic_set(&lov->lo_active_ios, 0);
        init_waitqueue_head(&lov->lo_waitq);
 
        cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
@@ -792,7 +792,7 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
 
        if (conf->coc_opc == OBJECT_CONF_WAIT) {
                if (lov->lo_layout_invalid &&
-                   cfs_atomic_read(&lov->lo_active_ios) > 0) {
+                   atomic_read(&lov->lo_active_ios) > 0) {
                        lov_conf_unlock(lov);
                        result = lov_layout_wait(env, lov);
                        lov_conf_lock(lov);
@@ -814,7 +814,7 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
        }
 
        /* will change layout - check if there still exists active IO. */
-       if (cfs_atomic_read(&lov->lo_active_ios) > 0) {
+       if (atomic_read(&lov->lo_active_ios) > 0) {
                lov->lo_layout_invalid = true;
                GOTO(out, result = -EBUSY);
        }
@@ -955,7 +955,7 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
        if (lov->lo_lsm != NULL) {
                lsm = lsm_addref(lov->lo_lsm);
                CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
-                       lsm, cfs_atomic_read(&lsm->lsm_refc),
+                       lsm, atomic_read(&lsm->lsm_refc),
                        lov->lo_layout_invalid, current);
        }
        lov_conf_thaw(lov);
index 3c511fe..3a6f130 100644 (file)
@@ -307,34 +307,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
 }
 
 int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
-                    int pattern, int magic)
+                   int pattern, int magic)
 {
-        int i, lsm_size;
-        ENTRY;
+       int i, lsm_size;
+       ENTRY;
 
-        CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+       CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
 
-        *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
-        if (!*lsmp) {
-                CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
-                RETURN(-ENOMEM);
-        }
+       *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
+       if (!*lsmp) {
+               CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
+               RETURN(-ENOMEM);
+       }
 
-       cfs_atomic_set(&(*lsmp)->lsm_refc, 1);
+       atomic_set(&(*lsmp)->lsm_refc, 1);
        spin_lock_init(&(*lsmp)->lsm_lock);
-        (*lsmp)->lsm_magic = magic;
-        (*lsmp)->lsm_stripe_count = stripe_count;
-        (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
-        (*lsmp)->lsm_pattern = pattern;
-        (*lsmp)->lsm_pool_name[0] = '\0';
-        (*lsmp)->lsm_layout_gen = 0;
+       (*lsmp)->lsm_magic = magic;
+       (*lsmp)->lsm_stripe_count = stripe_count;
+       (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+       (*lsmp)->lsm_pattern = pattern;
+       (*lsmp)->lsm_pool_name[0] = '\0';
+       (*lsmp)->lsm_layout_gen = 0;
        if (stripe_count > 0)
                (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
 
-        for (i = 0; i < stripe_count; i++)
-                loi_init((*lsmp)->lsm_oinfo[i]);
+       for (i = 0; i < stripe_count; i++)
+               loi_init((*lsmp)->lsm_oinfo[i]);
 
-        RETURN(lsm_size);
+       RETURN(lsm_size);
 }
 
 int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -343,8 +343,9 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
        int refc;
 
        *lsmp = NULL;
-       LASSERT(cfs_atomic_read(&lsm->lsm_refc) > 0);
-       if ((refc = cfs_atomic_dec_return(&lsm->lsm_refc)) == 0) {
+       refc = atomic_dec_return(&lsm->lsm_refc);
+       LASSERT(refc >= 0);
+       if (refc == 0) {
                LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
                lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
        }
index c017ab4..14dd336 100644 (file)
 
 static void lov_pool_getref(struct pool_desc *pool)
 {
-        CDEBUG(D_INFO, "pool %p\n", pool);
-        cfs_atomic_inc(&pool->pool_refcount);
+       CDEBUG(D_INFO, "pool %p\n", pool);
+       atomic_inc(&pool->pool_refcount);
 }
 
 void lov_pool_putref(struct pool_desc *pool) 
 {
-        CDEBUG(D_INFO, "pool %p\n", pool);
-        if (cfs_atomic_dec_and_test(&pool->pool_refcount)) {
-                LASSERT(cfs_hlist_unhashed(&pool->pool_hash));
-                LASSERT(cfs_list_empty(&pool->pool_list));
-                LASSERT(pool->pool_proc_entry == NULL);
-                lov_ost_pool_free(&(pool->pool_obds));
-                OBD_FREE_PTR(pool);
-                EXIT;
-        }
+       CDEBUG(D_INFO, "pool %p\n", pool);
+       if (atomic_dec_and_test(&pool->pool_refcount)) {
+               LASSERT(hlist_unhashed(&pool->pool_hash));
+               LASSERT(list_empty(&pool->pool_list));
+               LASSERT(pool->pool_proc_entry == NULL);
+               lov_ost_pool_free(&(pool->pool_obds));
+               OBD_FREE_PTR(pool);
+               EXIT;
+       }
 }
 
 void lov_pool_putref_locked(struct pool_desc *pool)
 {
-        CDEBUG(D_INFO, "pool %p\n", pool);
-        LASSERT(cfs_atomic_read(&pool->pool_refcount) > 1);
+       CDEBUG(D_INFO, "pool %p\n", pool);
+       LASSERT(atomic_read(&pool->pool_refcount) > 1);
 
-        cfs_atomic_dec(&pool->pool_refcount);
+       atomic_dec(&pool->pool_refcount);
 }
 
 /*
@@ -448,16 +448,16 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         if (new_pool == NULL)
                 RETURN(-ENOMEM);
 
-        strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
-        new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
+       strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
+       new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
        new_pool->pool_lobd = obd;
-        /* ref count init to 1 because when created a pool is always used
-         * up to deletion
-         */
-        cfs_atomic_set(&new_pool->pool_refcount, 1);
-        rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
-        if (rc)
-               GOTO(out_err, rc);
+       /* ref count init to 1 because when created a pool is always used
+        * up to deletion
+        */
+       atomic_set(&new_pool->pool_refcount, 1);
+       rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
+       if (rc)
+               GOTO(out_err, rc);
 
         CFS_INIT_HLIST_NODE(&new_pool->pool_hash);
 
index bf5bfbc..548ed58 100644 (file)
 static void lov_init_set(struct lov_request_set *set)
 {
        set->set_count = 0;
-       cfs_atomic_set(&set->set_completes, 0);
-       cfs_atomic_set(&set->set_success, 0);
-       cfs_atomic_set(&set->set_finish_checked, 0);
+       atomic_set(&set->set_completes, 0);
+       atomic_set(&set->set_success, 0);
+       atomic_set(&set->set_finish_checked, 0);
        set->set_cookies = 0;
        CFS_INIT_LIST_HEAD(&set->set_list);
-       cfs_atomic_set(&set->set_refcount, 1);
+       atomic_set(&set->set_refcount, 1);
        init_waitqueue_head(&set->set_waitq);
 }
 
@@ -85,14 +85,14 @@ void lov_finish_set(struct lov_request_set *set)
 
 int lov_set_finished(struct lov_request_set *set, int idempotent)
 {
-       int completes = cfs_atomic_read(&set->set_completes);
+       int completes = atomic_read(&set->set_completes);
 
        CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count);
 
        if (completes == set->set_count) {
                if (idempotent)
                        return 1;
-               if (cfs_atomic_inc_return(&set->set_finish_checked) == 1)
+               if (atomic_inc_return(&set->set_finish_checked) == 1)
                        return 1;
        }
        return 0;
@@ -104,9 +104,9 @@ void lov_update_set(struct lov_request_set *set,
        req->rq_complete = 1;
        req->rq_rc = rc;
 
-       cfs_atomic_inc(&set->set_completes);
+       atomic_inc(&set->set_completes);
        if (rc == 0)
-               cfs_atomic_inc(&set->set_success);
+               atomic_inc(&set->set_success);
 
        wake_up(&set->set_waitq);
 }
@@ -199,17 +199,17 @@ static int common_attr_done(struct lov_request_set *set)
         int rc = 0, attrset = 0;
         ENTRY;
 
-        LASSERT(set->set_oi != NULL);
+       LASSERT(set->set_oi != NULL);
 
-        if (set->set_oi->oi_oa == NULL)
-                RETURN(0);
+       if (set->set_oi->oi_oa == NULL)
+               RETURN(0);
 
-        if (!cfs_atomic_read(&set->set_success))
-                RETURN(-EIO);
+       if (!atomic_read(&set->set_success))
+               RETURN(-EIO);
 
-        OBDO_ALLOC(tmp_oa);
-        if (tmp_oa == NULL)
-                GOTO(out, rc = -ENOMEM);
+       OBDO_ALLOC(tmp_oa);
+       if (tmp_oa == NULL)
+               GOTO(out, rc = -ENOMEM);
 
         cfs_list_for_each (pos, &set->set_list) {
                 req = cfs_list_entry(pos, struct lov_request, rq_link);
@@ -245,18 +245,18 @@ out:
 
 int lov_fini_getattr_set(struct lov_request_set *set)
 {
-        int rc = 0;
-        ENTRY;
+       int rc = 0;
+       ENTRY;
 
-        if (set == NULL)
-                RETURN(0);
-        LASSERT(set->set_exp);
-        if (cfs_atomic_read(&set->set_completes))
-                rc = common_attr_done(set);
+       if (set == NULL)
+               RETURN(0);
+       LASSERT(set->set_exp);
+       if (atomic_read(&set->set_completes))
+               rc = common_attr_done(set);
 
-        lov_put_reqset(set);
+       lov_put_reqset(set);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /* The callback for osc_getattr_async that finilizes a request info when a
@@ -329,18 +329,18 @@ out_set:
 
 int lov_fini_destroy_set(struct lov_request_set *set)
 {
-        ENTRY;
+       ENTRY;
 
-        if (set == NULL)
-                RETURN(0);
-        LASSERT(set->set_exp);
-        if (cfs_atomic_read(&set->set_completes)) {
-                /* FIXME update qos data here */
-        }
+       if (set == NULL)
+               RETURN(0);
+       LASSERT(set->set_exp);
+       if (atomic_read(&set->set_completes)) {
+               /* FIXME update qos data here */
+       }
 
-        lov_put_reqset(set);
+       lov_put_reqset(set);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
@@ -402,19 +402,19 @@ out_set:
 
 int lov_fini_setattr_set(struct lov_request_set *set)
 {
-        int rc = 0;
-        ENTRY;
+       int rc = 0;
+       ENTRY;
 
-        if (set == NULL)
-                RETURN(0);
-        LASSERT(set->set_exp);
-        if (cfs_atomic_read(&set->set_completes)) {
-                rc = common_attr_done(set);
-                /* FIXME update qos data here */
-        }
+       if (set == NULL)
+               RETURN(0);
+       LASSERT(set->set_exp);
+       if (atomic_read(&set->set_completes)) {
+               rc = common_attr_done(set);
+               /* FIXME update qos data here */
+       }
 
-        lov_put_reqset(set);
-        RETURN(rc);
+       lov_put_reqset(set);
+       RETURN(rc);
 }
 
 int lov_update_setattr_set(struct lov_request_set *set,
@@ -558,18 +558,18 @@ int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
 
 int lov_fini_statfs_set(struct lov_request_set *set)
 {
-        int rc = 0;
-        ENTRY;
+       int rc = 0;
+       ENTRY;
 
-        if (set == NULL)
-                RETURN(0);
+       if (set == NULL)
+               RETURN(0);
 
-        if (cfs_atomic_read(&set->set_completes)) {
-                rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
-                                     cfs_atomic_read(&set->set_success));
-        }
-        lov_put_reqset(set);
-        RETURN(rc);
+       if (atomic_read(&set->set_completes)) {
+               rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
+                                    atomic_read(&set->set_success));
+       }
+       lov_put_reqset(set);
+       RETURN(rc);
 }
 
 void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
@@ -654,18 +654,18 @@ static int cb_statfs_update(void *cookie, int rc)
         int success;
         ENTRY;
 
-        lovreq = container_of(oinfo, struct lov_request, rq_oi);
-        set = lovreq->rq_rqset;
-        lovobd = set->set_obd;
-        lov = &lovobd->u.lov;
-        osfs = set->set_oi->oi_osfs;
-        lov_sfs = oinfo->oi_osfs;
-        success = cfs_atomic_read(&set->set_success);
-        /* XXX: the same is done in lov_update_common_set, however
-           lovset->set_exp is not initialized. */
-        lov_update_set(set, lovreq, rc);
-        if (rc)
-                GOTO(out, rc);
+       lovreq = container_of(oinfo, struct lov_request, rq_oi);
+       set = lovreq->rq_rqset;
+       lovobd = set->set_obd;
+       lov = &lovobd->u.lov;
+       osfs = set->set_oi->oi_osfs;
+       lov_sfs = oinfo->oi_osfs;
+       success = atomic_read(&set->set_success);
+       /* XXX: the same is done in lov_update_common_set, however
+          lovset->set_exp is not initialized. */
+       lov_update_set(set, lovreq, rc);
+       if (rc)
+               GOTO(out, rc);
 
         obd_getref(lovobd);
         tgt = lov->lov_tgts[lovreq->rq_idx];
@@ -687,7 +687,7 @@ out:
        if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD &&
            lov_set_finished(set, 0)) {
                lov_statfs_interpret(NULL, set, set->set_count !=
-                                    cfs_atomic_read(&set->set_success));
+                                    atomic_read(&set->set_success));
        }
 
        RETURN(0);
index 4090be0..e2fa73a 100644 (file)
@@ -132,12 +132,12 @@ static struct lu_device *lovsub_device_fini(const struct lu_env *env,
 }
 
 static struct lu_device *lovsub_device_free(const struct lu_env *env,
-                                            struct lu_device *d)
+                                           struct lu_device *d)
 {
        struct lovsub_device *lsd  = lu2lovsub_dev(d);
        struct lu_device     *next = cl2lu_dev(lsd->acid_next);
 
-       if (cfs_atomic_read(&d->ld_ref) && d->ld_site) {
+       if (atomic_read(&d->ld_ref) && d->ld_site) {
                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
                lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
        }
index ae814d4..48b3b8e 100644 (file)
@@ -88,7 +88,7 @@ static void nodemap_getref(struct lu_nodemap *nodemap)
 void nodemap_putref(struct lu_nodemap *nodemap)
 {
        LASSERT(nodemap != NULL);
-       LASSERT(cfs_atomic_read(&nodemap->nm_refcount) > 0);
+       LASSERT(atomic_read(&nodemap->nm_refcount) > 0);
 
        if (atomic_dec_and_test(&nodemap->nm_refcount))
                nodemap_destroy(nodemap);