Whamcloud - gitweb
LU-3963 libcfs: convert include/lclient/ldlm/lfsck cfs_atomic 72/7072/9
authorJames Simmons <uja.ornl@gmail.com>
Fri, 10 Jan 2014 17:02:12 +0000 (12:02 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Mon, 13 Jan 2014 19:19:00 +0000 (19:19 +0000)
This patch converts all cfs_atomic primitives in
lustre/include, lclient, ldlm and lfsck directories.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Ie746993b917bd6ea8c2403a47488ef0e5a06d6fb
Reviewed-on: http://review.whamcloud.com/7072
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
27 files changed:
lustre/include/cl_object.h
lustre/include/dt_object.h
lustre/include/lclient.h
lustre/include/liblustre.h
lustre/include/lu_object.h
lustre/include/lustre_capa.h
lustre/include/lustre_disk.h
lustre/include/lustre_dlm.h
lustre/include/lustre_export.h
lustre/include/lustre_import.h
lustre/include/lustre_log.h
lustre/include/lustre_net.h
lustre/include/lustre_sec.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/include/obd_support.h
lustre/include/obd_target.h
lustre/lclient/glimpse.c
lustre/lclient/lcommon_cl.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/lfsck/lfsck_internal.h

index 0aedd62..8a0aaac 100644 (file)
@@ -714,13 +714,13 @@ enum cl_page_flags {
  * cl_page::cp_owner (when set).
  */
 struct cl_page {
-        /** Reference counter. */
-        cfs_atomic_t             cp_ref;
-        /** An object this page is a part of. Immutable after creation. */
-        struct cl_object        *cp_obj;
-        /** List of slices. Immutable after creation. */
-        cfs_list_t               cp_layers;
-       struct page             *cp_vmpage;
+       /** Reference counter. */
+       atomic_t                cp_ref;
+       /** An object this page is a part of. Immutable after creation. */
+       struct cl_object        *cp_obj;
+       /** List of slices. Immutable after creation. */
+       cfs_list_t              cp_layers;
+       struct page             *cp_vmpage;
        /**
         * Page state. This field is const to avoid accidental update, it is
         * modified only internally within cl_page.c. Protected by a VM lock.
@@ -1073,8 +1073,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
 {
        if (page->cp_type == CPT_CACHEABLE)
                ++refc;
-       LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
-       return (cfs_atomic_read(&page->cp_ref) > refc);
+       LASSERT(atomic_read(&page->cp_ref) > 0);
+       return (atomic_read(&page->cp_ref) > refc);
 }
 #define cl_page_in_use(pg)       __page_in_use(pg, 1)
 #define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
@@ -1505,10 +1505,10 @@ struct cl_lock_closure {
  * Layered client lock.
  */
 struct cl_lock {
-        /** Reference counter. */
-        cfs_atomic_t          cll_ref;
-        /** List of slices. Immutable after creation. */
-        cfs_list_t            cll_layers;
+       /** Reference counter. */
+       atomic_t              cll_ref;
+       /** List of slices. Immutable after creation. */
+       cfs_list_t            cll_layers;
         /**
          * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
          * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
@@ -2547,8 +2547,8 @@ enum cache_stats_item {
  * Stats for a generic cache (similar to inode, lu_object, etc. caches).
  */
 struct cache_stats {
-        const char    *cs_name;
-        cfs_atomic_t   cs_stats[CS_NR];
+       const char      *cs_name;
+       atomic_t        cs_stats[CS_NR];
 };
 
 /** These are not exported so far */
@@ -2562,24 +2562,24 @@ int  cache_stats_print(const struct cache_stats *cs,
  * clients to co-exist in the single address space.
  */
 struct cl_site {
-        struct lu_site        cs_lu;
-        /**
-         * Statistical counters. Atomics do not scale, something better like
-         * per-cpu counters is needed.
-         *
-         * These are exported as /proc/fs/lustre/llite/.../site
-         *
-         * When interpreting keep in mind that both sub-locks (and sub-pages)
-         * and top-locks (and top-pages) are accounted here.
-         */
-        struct cache_stats    cs_pages;
-        struct cache_stats    cs_locks;
-        cfs_atomic_t          cs_pages_state[CPS_NR];
-        cfs_atomic_t          cs_locks_state[CLS_NR];
+       struct lu_site          cs_lu;
+       /**
+        * Statistical counters. Atomics do not scale, something better like
+        * per-cpu counters is needed.
+        *
+        * These are exported as /proc/fs/lustre/llite/.../site
+        *
+        * When interpreting keep in mind that both sub-locks (and sub-pages)
+        * and top-locks (and top-pages) are accounted here.
+        */
+       struct cache_stats      cs_pages;
+       struct cache_stats      cs_locks;
+       atomic_t                cs_pages_state[CPS_NR];
+       atomic_t                cs_locks_state[CLS_NR];
 };
 
-int  cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
+int  cl_site_init(struct cl_site *s, struct cl_device *top);
+void cl_site_fini(struct cl_site *s);
 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
 
 /**
@@ -2729,7 +2729,7 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
 static inline int cl_object_refc(struct cl_object *clob)
 {
        struct lu_object_header *header = clob->co_lu.lo_header;
-       return cfs_atomic_read(&header->loh_ref);
+       return atomic_read(&header->loh_ref);
 }
 
 /** @} cl_object */
@@ -3135,11 +3135,11 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
  */
 struct cl_sync_io {
        /** number of pages yet to be transferred. */
-       cfs_atomic_t            csi_sync_nr;
+       atomic_t                csi_sync_nr;
        /** error code. */
        int                     csi_sync_rc;
        /** barrier of destroy this structure */
-       cfs_atomic_t            csi_barrier;
+       atomic_t                csi_barrier;
        /** completion to be signaled when transfer is complete. */
        wait_queue_head_t       csi_waitq;
 };
index bdf559d..8faf5f8 100644 (file)
@@ -690,7 +690,7 @@ struct local_oid_storage {
        cfs_list_t        los_list;
 
        /* how many handle's reference this los has */
-       cfs_atomic_t      los_refcount;
+       atomic_t          los_refcount;
        struct dt_device *los_dev;
        struct dt_object *los_obj;
 
index f165d14..04a3744 100644 (file)
@@ -201,12 +201,12 @@ struct ccc_object {
          * we don't need to hold any lock..
          */
         int                     cob_transient_pages;
-        /**
-         * Number of outstanding mmaps on this file.
-         *
-         * \see ll_vm_open(), ll_vm_close().
-         */
-        cfs_atomic_t            cob_mmap_cnt;
+       /**
+        * Number of outstanding mmaps on this file.
+        *
+        * \see ll_vm_open(), ll_vm_close().
+        */
+       atomic_t                cob_mmap_cnt;
 
        /**
         * various flags
@@ -447,13 +447,13 @@ void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
  * layer for recovery purposes.
  */
 struct cl_client_cache {
-       cfs_atomic_t            ccc_users;    /* # of users (OSCs) */
+       atomic_t                ccc_users;    /* # of users (OSCs) */
        cfs_list_t              ccc_lru;      /* LRU of cached clean pages */
        spinlock_t              ccc_lru_lock; /* lock for list */
-       cfs_atomic_t            ccc_lru_left; /* # of LRU entries available */
+       atomic_t                ccc_lru_left; /* # of LRU entries available */
        unsigned long           ccc_lru_max;  /* Max # of LRU entries */
        unsigned int            ccc_lru_shrinkers;  /* # of threads shrinking */
-       cfs_atomic_t            ccc_unstable_nr;    /* # of pages pinned */
+       atomic_t                ccc_unstable_nr;    /* # of pages pinned */
        wait_queue_head_t       ccc_unstable_waitq; /* Signaled on BRW commit */
 };
 
index 4aa78f0..882fd18 100644 (file)
@@ -437,9 +437,9 @@ typedef struct {
 } posix_acl_xattr_entry;
 
 struct posix_acl {
-        cfs_atomic_t           a_refcount;
-        unsigned int           a_count;
-        posix_acl_xattr_entry  a_entries[0];
+       atomic_t                a_refcount;
+       unsigned int            a_count;
+       posix_acl_xattr_entry   a_entries[0];
 };
 
 typedef struct {
index e5a977f..b3e2a55 100644 (file)
@@ -258,17 +258,17 @@ struct lu_device_type;
  * Device: a layer in the server side abstraction stacking.
  */
 struct lu_device {
-        /**
-         * reference count. This is incremented, in particular, on each object
-         * created at this layer.
-         *
-         * \todo XXX which means that atomic_t is probably too small.
-         */
-        cfs_atomic_t                       ld_ref;
-        /**
-         * Pointer to device type. Never modified once set.
-         */
-        struct lu_device_type       *ld_type;
+       /**
+        * reference count. This is incremented, in particular, on each object
+        * created at this layer.
+        *
+        * \todo XXX which means that atomic_t is probably too small.
+        */
+       atomic_t                           ld_ref;
+       /**
+        * Pointer to device type. Never modified once set.
+        */
+       struct lu_device_type             *ld_type;
         /**
          * Operation vector for this device.
          */
@@ -515,41 +515,41 @@ enum lu_object_header_attr {
  * whether object is backed by persistent storage entity.
  */
 struct lu_object_header {
-        /**
-         * Object flags from enum lu_object_header_flags. Set and checked
-         * atomically.
-         */
-        unsigned long          loh_flags;
-        /**
-         * Object reference count. Protected by lu_site::ls_guard.
-         */
-        cfs_atomic_t           loh_ref;
-        /**
-         * Fid, uniquely identifying this object.
-         */
-        struct lu_fid          loh_fid;
-        /**
-         * Common object attributes, cached for efficiency. From enum
-         * lu_object_header_attr.
-         */
-        __u32                  loh_attr;
-        /**
-         * Linkage into per-site hash table. Protected by lu_site::ls_guard.
-         */
-        cfs_hlist_node_t       loh_hash;
-        /**
-         * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
-         */
-        cfs_list_t             loh_lru;
-        /**
-         * Linkage into list of layers. Never modified once set (except lately
-         * during object destruction). No locking is necessary.
-         */
-        cfs_list_t             loh_layers;
-        /**
-         * A list of references to this object, for debugging.
-         */
-        struct lu_ref          loh_reference;
+       /**
+        * Object flags from enum lu_object_header_flags. Set and checked
+        * atomically.
+        */
+       unsigned long           loh_flags;
+       /**
+        * Object reference count. Protected by lu_site::ls_guard.
+        */
+       atomic_t                loh_ref;
+       /**
+        * Fid, uniquely identifying this object.
+        */
+       struct lu_fid           loh_fid;
+       /**
+        * Common object attributes, cached for efficiency. From enum
+        * lu_object_header_attr.
+        */
+       __u32                   loh_attr;
+       /**
+        * Linkage into per-site hash table. Protected by lu_site::ls_guard.
+        */
+       cfs_hlist_node_t        loh_hash;
+       /**
+        * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
+        */
+       cfs_list_t              loh_lru;
+       /**
+        * Linkage into list of layers. Never modified once set (except lately
+        * during object destruction). No locking is necessary.
+        */
+       cfs_list_t              loh_layers;
+       /**
+        * A list of references to this object, for debugging.
+        */
+       struct lu_ref           loh_reference;
 };
 
 struct fld;
@@ -697,8 +697,8 @@ void lu_types_stop(void);
  */
 static inline void lu_object_get(struct lu_object *o)
 {
-        LASSERT(cfs_atomic_read(&o->lo_header->loh_ref) > 0);
-        cfs_atomic_inc(&o->lo_header->loh_ref);
+       LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
+       atomic_inc(&o->lo_header->loh_ref);
 }
 
 /**
@@ -1103,24 +1103,24 @@ struct lu_context_key {
          */
         void   (*lct_exit)(const struct lu_context *ctx,
                            struct lu_context_key *key, void *data);
-        /**
-         * Internal implementation detail: index within lu_context::lc_value[]
-         * reserved for this key.
-         */
-        int      lct_index;
-        /**
-         * Internal implementation detail: number of values created for this
-         * key.
-         */
-        cfs_atomic_t lct_used;
+       /**
+        * Internal implementation detail: index within lu_context::lc_value[]
+        * reserved for this key.
+        */
+       int             lct_index;
+       /**
+        * Internal implementation detail: number of values created for this
+        * key.
+        */
+       atomic_t        lct_used;
        /**
         * Internal implementation detail: module for this key.
         */
-       struct module *lct_owner;
+       struct module   *lct_owner;
        /**
         * References to this key. For debugging.
         */
-       struct lu_ref  lct_reference;
+       struct lu_ref   lct_reference;
 };
 
 #define LU_KEY_INIT(mod, type)                                    \
index b665168..b83cd6f 100644 (file)
@@ -80,11 +80,11 @@ struct target_capa {
 };
 
 struct obd_capa {
-        cfs_list_t                c_list;       /* link to capa_list */
+       cfs_list_t              c_list;         /* link to capa_list */
 
-        struct lustre_capa        c_capa;       /* capa */
-        cfs_atomic_t              c_refc;       /* ref count */
-        cfs_time_t                c_expiry;     /* jiffies */
+       struct lustre_capa      c_capa;         /* capa */
+       atomic_t                c_refc;         /* ref count */
+       cfs_time_t              c_expiry;       /* jiffies */
        spinlock_t              c_lock; /* protect capa content */
        int                     c_site;
 
@@ -205,10 +205,10 @@ static inline struct obd_capa *alloc_capa(int site)
         if (unlikely(!ocapa))
                 return ERR_PTR(-ENOMEM);
 
-        CFS_INIT_LIST_HEAD(&ocapa->c_list);
-        cfs_atomic_set(&ocapa->c_refc, 1);
+       CFS_INIT_LIST_HEAD(&ocapa->c_list);
+       atomic_set(&ocapa->c_refc, 1);
        spin_lock_init(&ocapa->c_lock);
-        ocapa->c_site = site;
+       ocapa->c_site = site;
         if (ocapa->c_site == CAPA_SITE_CLIENT)
                 CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
         else
@@ -222,29 +222,29 @@ static inline struct obd_capa *alloc_capa(int site)
 
 static inline struct obd_capa *capa_get(struct obd_capa *ocapa)
 {
-        if (!ocapa)
-                return NULL;
+       if (!ocapa)
+               return NULL;
 
-        cfs_atomic_inc(&ocapa->c_refc);
-        return ocapa;
+       atomic_inc(&ocapa->c_refc);
+       return ocapa;
 }
 
 static inline void capa_put(struct obd_capa *ocapa)
 {
-        if (!ocapa)
-                return;
-
-        if (cfs_atomic_read(&ocapa->c_refc) == 0) {
-                DEBUG_CAPA(D_ERROR, &ocapa->c_capa, "refc is 0 for");
-                LBUG();
-        }
-
-        if (cfs_atomic_dec_and_test(&ocapa->c_refc)) {
-                LASSERT(cfs_list_empty(&ocapa->c_list));
-                if (ocapa->c_site == CAPA_SITE_CLIENT) {
-                        LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list));
-                } else {
-                        cfs_hlist_node_t *hnode;
+       if (!ocapa)
+               return;
+
+       if (atomic_read(&ocapa->c_refc) == 0) {
+               DEBUG_CAPA(D_ERROR, &ocapa->c_capa, "refc is 0 for");
+               LBUG();
+       }
+
+       if (atomic_dec_and_test(&ocapa->c_refc)) {
+               LASSERT(cfs_list_empty(&ocapa->c_list));
+               if (ocapa->c_site == CAPA_SITE_CLIENT) {
+                       LASSERT(cfs_list_empty(&ocapa->u.cli.lli_list));
+               } else {
+                       cfs_hlist_node_t *hnode;
 
                         hnode = &ocapa->u.tgt.c_hash;
                         LASSERT(!hnode->next && !hnode->pprev);
index ff69a04..09b72a3 100644 (file)
@@ -486,20 +486,20 @@ static inline __u64 lcd_last_xid(struct lsd_client_data *lcd)
 struct ll_sb_info;
 
 struct lustre_sb_info {
-        int                       lsi_flags;
-        struct obd_device        *lsi_mgc;     /* mgc obd */
-        struct lustre_mount_data *lsi_lmd;     /* mount command info */
-        struct ll_sb_info        *lsi_llsbi;   /* add'l client sbi info */
+       int                       lsi_flags;
+       struct obd_device        *lsi_mgc;     /* mgc obd */
+       struct lustre_mount_data *lsi_lmd;     /* mount command info */
+       struct ll_sb_info        *lsi_llsbi;   /* add'l client sbi info */
        struct dt_device         *lsi_dt_dev;  /* dt device to access disk fs*/
-        cfs_atomic_t              lsi_mounts;  /* references to the srv_mnt */
+       atomic_t                  lsi_mounts;  /* references to the srv_mnt */
        char                      lsi_svname[MTI_NAME_MAXLEN];
        char                      lsi_osd_obdname[64];
        char                      lsi_osd_uuid[64];
        struct obd_export        *lsi_osd_exp;
        char                      lsi_osd_type[16];
        char                      lsi_fstype[16];
-        struct backing_dev_info   lsi_bdi;     /* each client mountpoint needs
-                                                  own backing_dev_info */
+       struct backing_dev_info   lsi_bdi;     /* each client mountpoint needs
+                                                 own backing_dev_info */
 };
 
 #define LSI_UMOUNT_FAILOVER              0x00200000
index d01b7e9..98e03c9 100644 (file)
@@ -249,20 +249,20 @@ struct ldlm_pool {
        /** Lock for protecting SLV/CLV updates. */
        spinlock_t              pl_lock;
        /** Number of allowed locks in in pool, both, client and server side. */
-       cfs_atomic_t            pl_limit;
+       atomic_t                pl_limit;
        /** Number of granted locks in */
-       cfs_atomic_t            pl_granted;
+       atomic_t                pl_granted;
        /** Grant rate per T. */
-       cfs_atomic_t            pl_grant_rate;
+       atomic_t                pl_grant_rate;
        /** Cancel rate per T. */
-       cfs_atomic_t            pl_cancel_rate;
+       atomic_t                pl_cancel_rate;
        /** Server lock volume (SLV). Protected by pl_lock. */
        __u64                   pl_server_lock_volume;
        /** Current biggest client lock volume. Protected by pl_lock. */
        __u64                   pl_client_lock_volume;
        /** Lock volume factor. SLV on client is calculated as following:
         *  server_slv * lock_volume_factor. */
-       cfs_atomic_t            pl_lock_volume_factor;
+       atomic_t                pl_lock_volume_factor;
        /** Time when last SLV from server was obtained. */
        time_t                  pl_recalc_time;
        /** Recalculation period for pool. */
@@ -389,7 +389,7 @@ struct ldlm_namespace {
        spinlock_t              ns_lock;
 
        /** big refcount (by bucket) */
-       cfs_atomic_t            ns_bref;
+       atomic_t                ns_bref;
 
        /**
         * Namespace connect flags supported by server (may be changed via
@@ -678,7 +678,7 @@ struct ldlm_lock {
         * This is how many users have pointers to actual structure, so that
         * we do not accidentally free lock structure that is in use.
         */
-       cfs_atomic_t            l_refc;
+       atomic_t                l_refc;
        /**
         * Internal spinlock protects l_resource.  We should hold this lock
         * first before taking res_lock.
@@ -942,7 +942,7 @@ struct ldlm_resource {
        /** Resource name */
        struct ldlm_res_id      lr_name;
        /** Reference count for this resource */
-       cfs_atomic_t            lr_refcount;
+       atomic_t                lr_refcount;
 
        /**
         * Interval trees (only for extent locks) for all modes of this resource
index 27ea76f..a52eeaf 100644 (file)
@@ -112,27 +112,27 @@ struct mgs_export_data {
  * It tracks access patterns to this export on a per-client-NID basis
  */
 struct nid_stat {
-        lnet_nid_t               nid;
-        cfs_hlist_node_t         nid_hash;
-        cfs_list_t               nid_list;
-        struct obd_device       *nid_obd;
-        struct proc_dir_entry   *nid_proc;
-        struct lprocfs_stats    *nid_stats;
-        struct lprocfs_stats    *nid_ldlm_stats;
-        cfs_atomic_t             nid_exp_ref_count; /* for obd_nid_stats_hash
+       lnet_nid_t              nid;
+       cfs_hlist_node_t        nid_hash;
+       cfs_list_t              nid_list;
+       struct obd_device       *nid_obd;
+       struct proc_dir_entry   *nid_proc;
+       struct lprocfs_stats    *nid_stats;
+       struct lprocfs_stats    *nid_ldlm_stats;
+       atomic_t                nid_exp_ref_count; /* for obd_nid_stats_hash
                                                            exp_nid_stats */
 };
 
 #define nidstat_getref(nidstat)                                                \
 do {                                                                           \
-        cfs_atomic_inc(&(nidstat)->nid_exp_ref_count);                         \
+       atomic_inc(&(nidstat)->nid_exp_ref_count);                         \
 } while(0)
 
 #define nidstat_putref(nidstat)                                                \
 do {                                                                           \
-        cfs_atomic_dec(&(nidstat)->nid_exp_ref_count);                         \
-        LASSERTF(cfs_atomic_read(&(nidstat)->nid_exp_ref_count) >= 0,          \
-                 "stat %p nid_exp_ref_count < 0\n", nidstat);                  \
+       atomic_dec(&(nidstat)->nid_exp_ref_count);                         \
+       LASSERTF(atomic_read(&(nidstat)->nid_exp_ref_count) >= 0,          \
+                "stat %p nid_exp_ref_count < 0\n", nidstat);                  \
 } while(0)
 
 enum obd_option {
@@ -149,26 +149,26 @@ enum obd_option {
  * attached to the same obd device.
  */
 struct obd_export {
-        /**
-         * Export handle, it's id is provided to client on connect
-         * Subsequent client RPCs contain this handle id to identify
-         * what export they are talking to.
-         */
-        struct portals_handle     exp_handle;
-        cfs_atomic_t              exp_refcount;
-        /**
-         * Set of counters below is to track where export references are
-         * kept. The exp_rpc_count is used for reconnect handling also,
-         * the cb_count and locks_count are for debug purposes only for now.
-         * The sum of them should be less than exp_refcount by 3
-         */
-        cfs_atomic_t              exp_rpc_count; /* RPC references */
-        cfs_atomic_t              exp_cb_count; /* Commit callback references */
+       /**
+        * Export handle, it's id is provided to client on connect
+        * Subsequent client RPCs contain this handle id to identify
+        * what export they are talking to.
+        */
+       struct portals_handle     exp_handle;
+       atomic_t              exp_refcount;
+       /**
+        * Set of counters below is to track where export references are
+        * kept. The exp_rpc_count is used for reconnect handling also,
+        * the cb_count and locks_count are for debug purposes only for now.
+        * The sum of them should be less than exp_refcount by 3
+        */
+       atomic_t              exp_rpc_count; /* RPC references */
+       atomic_t              exp_cb_count; /* Commit callback references */
        /** Number of queued replay requests to be processes */
-       cfs_atomic_t              exp_replay_count;
-        cfs_atomic_t              exp_locks_count; /** Lock references */
+       atomic_t                  exp_replay_count;
+       atomic_t              exp_locks_count; /** Lock references */
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
-        cfs_list_t                exp_locks_list;
+       cfs_list_t                exp_locks_list;
        spinlock_t                exp_locks_list_guard;
 #endif
         /** UUID of client connected to this export */
index 3beccba..e30f688 100644 (file)
@@ -156,13 +156,13 @@ struct import_state_hist {
  * Imports are representing client-side view to remote target.
  */
 struct obd_import {
-        /** Local handle (== id) for this import. */
-        struct portals_handle     imp_handle;
-        /** Reference counter */
-        cfs_atomic_t              imp_refcount;
-        struct lustre_handle      imp_dlm_handle; /* client's ldlm export */
-        /** Currently active connection */
-        struct ptlrpc_connection *imp_connection;
+       /** Local handle (== id) for this import. */
+       struct portals_handle     imp_handle;
+       /** Reference counter */
+       atomic_t                  imp_refcount;
+       struct lustre_handle      imp_dlm_handle; /* client's ldlm export */
+       /** Currently active connection */
+       struct ptlrpc_connection *imp_connection;
         /** PortalRPC client structure for this import */
         struct ptlrpc_client     *imp_client;
         /** List element for linking into pinger chain */
@@ -206,17 +206,17 @@ struct obd_import {
        /** Wait queue for those who need to wait for recovery completion */
        wait_queue_head_t         imp_recovery_waitq;
 
-        /** Number of requests currently in-flight */
-        cfs_atomic_t              imp_inflight;
-        /** Number of requests currently unregistering */
-        cfs_atomic_t              imp_unregistering;
-        /** Number of replay requests inflight */
-        cfs_atomic_t              imp_replay_inflight;
-        /** Number of currently happening import invalidations */
-        cfs_atomic_t              imp_inval_count;
-        /** Numbner of request timeouts */
-        cfs_atomic_t              imp_timeouts;
-        /** Current import state */
+       /** Number of requests currently in-flight */
+       atomic_t                  imp_inflight;
+       /** Number of requests currently unregistering */
+       atomic_t                  imp_unregistering;
+       /** Number of replay requests inflight */
+       atomic_t                  imp_replay_inflight;
+       /** Number of currently happening import invalidations */
+       atomic_t                  imp_inval_count;
+       /** Numbner of request timeouts */
+       atomic_t                  imp_timeouts;
+       /** Current import state */
         enum lustre_imp_state     imp_state;
         /** History of import states */
         struct import_state_hist  imp_state_hist[IMP_STATE_HIST_LEN];
index 682596e..a780b7c 100644 (file)
@@ -331,7 +331,7 @@ struct llog_handle {
        char                    *lgh_name;
        void                    *private_data;
        struct llog_operations  *lgh_logops;
-       cfs_atomic_t             lgh_refcount;
+       atomic_t                 lgh_refcount;
 };
 
 /* llog_osd.c */
@@ -353,11 +353,11 @@ struct llog_ctxt {
         struct obd_export       *loc_exp; /* parent "disk" export (e.g. MDS) */
         struct obd_import       *loc_imp; /* to use in RPC's: can be backward
                                              pointing import */
-        struct llog_operations  *loc_logops;
-        struct llog_handle      *loc_handle;
+       struct llog_operations  *loc_logops;
+       struct llog_handle      *loc_handle;
        struct mutex             loc_mutex; /* protect loc_imp */
-        cfs_atomic_t             loc_refcount;
-        long                     loc_flags; /* flags, see above defines */
+       atomic_t                 loc_refcount;
+       long                     loc_flags; /* flags, see above defines */
        struct dt_object        *loc_dir;
 };
 
@@ -401,19 +401,19 @@ static inline int llog_get_size(struct llog_handle *loghandle)
 
 static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
 {
-        cfs_atomic_inc(&ctxt->loc_refcount);
-        CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
-               cfs_atomic_read(&ctxt->loc_refcount));
-        return ctxt;
+       atomic_inc(&ctxt->loc_refcount);
+       CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
+              atomic_read(&ctxt->loc_refcount));
+       return ctxt;
 }
 
 static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
 {
-        if (ctxt == NULL)
-                return;
-        LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
-        CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
-               cfs_atomic_read(&ctxt->loc_refcount) - 1);
+       if (ctxt == NULL)
+               return;
+       LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
+       CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
+              atomic_read(&ctxt->loc_refcount) - 1);
        __llog_ctxt_put(NULL, ctxt);
 }
 
index e7a5d1c..83c40cb 100644 (file)
  * Structure to single define portal connection.
  */
 struct ptlrpc_connection {
-        /** linkage for connections hash table */
-        cfs_hlist_node_t        c_hash;
-        /** Our own lnet nid for this connection */
-        lnet_nid_t              c_self;
-        /** Remote side nid for this connection */
-        lnet_process_id_t       c_peer;
-        /** UUID of the other side */
-        struct obd_uuid         c_remote_uuid;
-        /** reference counter for this connection */
-        cfs_atomic_t            c_refcount;
+       /** linkage for connections hash table */
+       cfs_hlist_node_t        c_hash;
+       /** Our own lnet nid for this connection */
+       lnet_nid_t              c_self;
+       /** Remote side nid for this connection */
+       lnet_process_id_t       c_peer;
+       /** UUID of the other side */
+       struct obd_uuid         c_remote_uuid;
+       /** reference counter for this connection */
+       atomic_t            c_refcount;
 };
 
 /** Client definition for PortalRPC */
@@ -574,11 +574,11 @@ typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
  * returned.
  */
 struct ptlrpc_request_set {
-       cfs_atomic_t          set_refcount;
+       atomic_t          set_refcount;
        /** number of in queue requests */
-       cfs_atomic_t          set_new_count;
+       atomic_t          set_new_count;
        /** number of uncompleted requests */
-       cfs_atomic_t          set_remaining;
+       atomic_t          set_remaining;
        /** wait queue to wait on for request events */
        wait_queue_head_t           set_waitq;
        wait_queue_head_t          *set_wakeup_ptr;
@@ -683,16 +683,16 @@ struct ptlrpc_reply_state {
         __u64                  rs_transno;
         /** xid */
         __u64                  rs_xid;
-        struct obd_export     *rs_export;
+       struct obd_export     *rs_export;
        struct ptlrpc_service_part *rs_svcpt;
-        /** Lnet metadata handle for the reply */
-        lnet_handle_md_t       rs_md_h;
-        cfs_atomic_t           rs_refcount;
-
-        /** Context for the sevice thread */
-        struct ptlrpc_svc_ctx *rs_svc_ctx;
-        /** Reply buffer (actually sent to the client), encoded if needed */
-        struct lustre_msg     *rs_repbuf;       /* wrapper */
+       /** Lnet metadata handle for the reply */
+       lnet_handle_md_t       rs_md_h;
+       atomic_t               rs_refcount;
+
+       /** Context for the sevice thread */
+       struct ptlrpc_svc_ctx *rs_svc_ctx;
+       /** Reply buffer (actually sent to the client), encoded if needed */
+       struct lustre_msg     *rs_repbuf;       /* wrapper */
         /** Size of the reply buffer */
         int                    rs_repbuf_len;   /* wrapper buf length */
         /** Size of the reply message */
@@ -1235,7 +1235,7 @@ struct ptlrpc_nrs_pol_desc {
        /**
         * # of references on this descriptor
         */
-       cfs_atomic_t                            pd_refs;
+       atomic_t                                pd_refs;
 };
 
 /**
@@ -1478,7 +1478,7 @@ struct nrs_crrn_client {
         * the current round number.
         */
        __u64                           cc_sequence;
-       cfs_atomic_t                    cc_ref;
+       atomic_t                        cc_ref;
        /**
         * Round Robin quantum; the maximum number of RPCs the client is allowed
         * to schedule in a single batch of each round.
@@ -1866,10 +1866,10 @@ struct ptlrpc_request {
 
        unsigned int rq_nr_resend;
 
-        enum rq_phase rq_phase; /* one of RQ_PHASE_* */
-        enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
-        cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
-                                    server-side refcounf for multiple replies */
+       enum rq_phase rq_phase; /* one of RQ_PHASE_* */
+       enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
+       atomic_t rq_refcount;/* client-side refcount for SENT race,
+                                   server-side refcounf for multiple replies */
 
        /** Portal to which this request would be sent */
        short rq_request_portal;  /* XXX FIXME bug 249 */
@@ -2681,7 +2681,7 @@ struct ptlrpc_service_part {
        /** waitq to run, when adding stuff to srv_free_rs_list */
        wait_queue_head_t               scp_rep_waitq;
        /** # 'difficult' replies */
-       cfs_atomic_t                    scp_nreps_difficult;
+       atomic_t                        scp_nreps_difficult;
 };
 
 #define ptlrpc_service_for_each_part(part, i, svc)                     \
@@ -3278,24 +3278,24 @@ static inline int ptlrpc_status_ntoh(int n)
 static inline void
 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
 {
-        if (req->rq_phase == new_phase)
-                return;
+       if (req->rq_phase == new_phase)
+               return;
 
-        if (new_phase == RQ_PHASE_UNREGISTERING) {
-                req->rq_next_phase = req->rq_phase;
-                if (req->rq_import)
-                        cfs_atomic_inc(&req->rq_import->imp_unregistering);
-        }
+       if (new_phase == RQ_PHASE_UNREGISTERING) {
+               req->rq_next_phase = req->rq_phase;
+               if (req->rq_import)
+                       atomic_inc(&req->rq_import->imp_unregistering);
+       }
 
-        if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
-                if (req->rq_import)
-                        cfs_atomic_dec(&req->rq_import->imp_unregistering);
-        }
+       if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+               if (req->rq_import)
+                       atomic_dec(&req->rq_import->imp_unregistering);
+       }
 
-        DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
-                  ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
+       DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
+                 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
 
-        req->rq_phase = new_phase;
+       req->rq_phase = new_phase;
 }
 
 /**
@@ -3360,16 +3360,16 @@ ptlrpc_client_wake_req(struct ptlrpc_request *req)
 static inline void
 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
 {
-        LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
-        cfs_atomic_inc(&rs->rs_refcount);
+       LASSERT(atomic_read(&rs->rs_refcount) > 0);
+       atomic_inc(&rs->rs_refcount);
 }
 
 static inline void
 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
 {
-        LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&rs->rs_refcount))
-                lustre_free_reply_state(rs);
+       LASSERT(atomic_read(&rs->rs_refcount) > 0);
+       if (atomic_dec_and_test(&rs->rs_refcount))
+               lustre_free_reply_state(rs);
 }
 
 /* Should only be called once per req */
index cd28ed4..23652c2 100644 (file)
@@ -514,10 +514,10 @@ struct ptlrpc_ctx_ops {
                                         PTLRPC_CTX_ERROR)
 
 struct ptlrpc_cli_ctx {
-        cfs_hlist_node_t        cc_cache;      /* linked into ctx cache */
-        cfs_atomic_t            cc_refcount;
-        struct ptlrpc_sec      *cc_sec;
-        struct ptlrpc_ctx_ops  *cc_ops;
+       cfs_hlist_node_t        cc_cache;      /* linked into ctx cache */
+       atomic_t                cc_refcount;
+       struct ptlrpc_sec      *cc_sec;
+       struct ptlrpc_ctx_ops  *cc_ops;
         cfs_time_t              cc_expire;     /* in seconds */
         unsigned int            cc_early_expire:1;
         unsigned long           cc_flags;
@@ -825,12 +825,12 @@ struct ptlrpc_sec_policy {
  * \see sptlrpc_import_sec_adapt().
  */
 struct ptlrpc_sec {
-        struct ptlrpc_sec_policy       *ps_policy;
-        cfs_atomic_t                    ps_refcount;
-        /** statistic only */
-        cfs_atomic_t                    ps_nctx;
-        /** unique identifier */
-        int                             ps_id;
+       struct ptlrpc_sec_policy       *ps_policy;
+       atomic_t                        ps_refcount;
+       /** statistic only */
+       atomic_t                        ps_nctx;
+       /** unique identifier */
+       int                             ps_id;
         struct sptlrpc_flavor           ps_flvr;
         enum lustre_sec_part            ps_part;
         /** after set, no more new context will be created */
@@ -859,8 +859,8 @@ static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
 
 
 struct ptlrpc_svc_ctx {
-        cfs_atomic_t                    sc_refcount;
-        struct ptlrpc_sec_policy       *sc_policy;
+       atomic_t                        sc_refcount;
+       struct ptlrpc_sec_policy       *sc_policy;
 };
 
 /*
index d6622b4..0f83c2a 100644 (file)
@@ -100,13 +100,13 @@ static inline void loi_init(struct lov_oinfo *loi)
 }
 
 struct lov_stripe_md {
-       cfs_atomic_t     lsm_refc;
+       atomic_t        lsm_refc;
        spinlock_t      lsm_lock;
-        pid_t            lsm_lock_owner; /* debugging */
+       pid_t           lsm_lock_owner; /* debugging */
 
-        /* maximum possible file size, might change as OSTs status changes,
-         * e.g. disconnected, deactivated */
-        __u64            lsm_maxbytes;
+       /* maximum possible file size, might change as OSTs status changes,
+        * e.g. disconnected, deactivated */
+       __u64            lsm_maxbytes;
         struct {
                 /* Public members. */
                struct ost_id lw_object_oi; /* lov object id/seq */
@@ -314,33 +314,33 @@ struct client_obd {
         cfs_list_t               cl_loi_hp_ready_list;
         cfs_list_t               cl_loi_write_list;
         cfs_list_t               cl_loi_read_list;
-        int                      cl_r_in_flight;
-        int                      cl_w_in_flight;
-        /* just a sum of the loi/lop pending numbers to be exported by /proc */
-       cfs_atomic_t             cl_pending_w_pages;
-       cfs_atomic_t             cl_pending_r_pages;
+       int                      cl_r_in_flight;
+       int                      cl_w_in_flight;
+       /* just a sum of the loi/lop pending numbers to be exported by /proc */
+       atomic_t                 cl_pending_w_pages;
+       atomic_t                 cl_pending_r_pages;
        __u32                    cl_max_pages_per_rpc;
-        int                      cl_max_rpcs_in_flight;
-        struct obd_histogram     cl_read_rpc_hist;
-        struct obd_histogram     cl_write_rpc_hist;
-        struct obd_histogram     cl_read_page_hist;
-        struct obd_histogram     cl_write_page_hist;
-        struct obd_histogram     cl_read_offset_hist;
-        struct obd_histogram     cl_write_offset_hist;
+       int                      cl_max_rpcs_in_flight;
+       struct obd_histogram     cl_read_rpc_hist;
+       struct obd_histogram     cl_write_rpc_hist;
+       struct obd_histogram     cl_read_page_hist;
+       struct obd_histogram     cl_write_page_hist;
+       struct obd_histogram     cl_read_offset_hist;
+       struct obd_histogram     cl_write_offset_hist;
 
        /* lru for osc caching pages */
        struct cl_client_cache  *cl_cache;
        cfs_list_t               cl_lru_osc; /* member of cl_cache->ccc_lru */
-       cfs_atomic_t            *cl_lru_left;
-       cfs_atomic_t             cl_lru_busy;
-       cfs_atomic_t             cl_lru_shrinkers;
-       cfs_atomic_t             cl_lru_in_list;
+       atomic_t                *cl_lru_left;
+       atomic_t                 cl_lru_busy;
+       atomic_t                 cl_lru_shrinkers;
+       atomic_t                 cl_lru_in_list;
        cfs_list_t               cl_lru_list; /* lru page list */
        client_obd_lock_t        cl_lru_list_lock; /* page list protector */
-       cfs_atomic_t             cl_unstable_count;
+       atomic_t                 cl_unstable_count;
 
-        /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
-        cfs_atomic_t             cl_destroy_in_flight;
+       /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
+       atomic_t             cl_destroy_in_flight;
        wait_queue_head_t        cl_destroy_waitq;
 
         struct mdc_rpc_lock     *cl_rpc_lock;
@@ -350,8 +350,8 @@ struct client_obd {
        struct semaphore         cl_mgc_sem;
        struct local_oid_storage *cl_mgc_los;
        struct dt_object        *cl_mgc_configs_dir;
-        cfs_atomic_t             cl_mgc_refcount;
-        struct obd_export       *cl_mgc_mgsexp;
+       atomic_t             cl_mgc_refcount;
+       struct obd_export       *cl_mgc_mgsexp;
 
         /* checksumming for data sent over the network */
         unsigned int             cl_checksum:1; /* 0 = disabled, 1 = enabled */
@@ -370,10 +370,10 @@ struct client_obd {
 #warning "please consider removing quotacheck compatibility code"
 #endif
 
-        /* sequence manager */
-        struct lu_client_seq    *cl_seq;
+       /* sequence manager */
+       struct lu_client_seq    *cl_seq;
 
-        cfs_atomic_t             cl_resends; /* resend count */
+       atomic_t             cl_resends; /* resend count */
 
        /* ptlrpc work for writeback in ptlrpcd context */
        void                    *cl_writeback_work;
@@ -422,26 +422,26 @@ struct lov_tgt_desc {
 };
 
 struct lov_obd {
-        struct lov_desc         desc;
-        struct lov_tgt_desc   **lov_tgts;              /* sparse array */
-        struct ost_pool         lov_packed;            /* all OSTs in a packed
-                                                          array */
+       struct lov_desc         desc;
+       struct lov_tgt_desc   **lov_tgts;               /* sparse array */
+       struct ost_pool         lov_packed;             /* all OSTs in a packed
+                                                          array */
        struct mutex            lov_lock;
-        struct obd_connect_data lov_ocd;
-        cfs_atomic_t            lov_refcount;
-        __u32                   lov_death_row;/* tgts scheduled to be deleted */
-        __u32                   lov_tgt_size;   /* size of tgts array */
-        int                     lov_connects;
-        int                     lov_pool_count;
-        cfs_hash_t             *lov_pools_hash_body; /* used for key access */
-        cfs_list_t              lov_pool_list; /* used for sequential access */
-        cfs_proc_dir_entry_t   *lov_pool_proc_entry;
-        enum lustre_sec_part    lov_sp_me;
+       struct obd_connect_data lov_ocd;
+       atomic_t                lov_refcount;
+       __u32                   lov_death_row;  /* tgts scheduled to be deleted */
+       __u32                   lov_tgt_size;   /* size of tgts array */
+       int                     lov_connects;
+       int                     lov_pool_count;
+       cfs_hash_t             *lov_pools_hash_body; /* used for key access */
+       cfs_list_t              lov_pool_list;  /* used for sequential access */
+       cfs_proc_dir_entry_t   *lov_pool_proc_entry;
+       enum lustre_sec_part    lov_sp_me;
 
        /* Cached LRU and unstable data from upper layer */
        void                   *lov_cache;
 
-       struct rw_semaphore     lov_notify_lock;
+       struct rw_semaphore     lov_notify_lock;
 };
 
 struct lmv_tgt_desc {
@@ -745,14 +745,14 @@ struct obd_device {
         cfs_hash_t             *obd_uuid_hash;
         /* nid-export hash body */
         cfs_hash_t             *obd_nid_hash;
-        /* nid stats body */
-        cfs_hash_t             *obd_nid_stats_hash;
-        cfs_list_t              obd_nid_stats;
-        cfs_atomic_t            obd_refcount;
-        cfs_list_t              obd_exports;
-        cfs_list_t              obd_unlinked_exports;
-        cfs_list_t              obd_delayed_exports;
-        int                     obd_num_exports;
+       /* nid stats body */
+       cfs_hash_t             *obd_nid_stats_hash;
+       cfs_list_t              obd_nid_stats;
+       atomic_t            obd_refcount;
+       cfs_list_t              obd_exports;
+       cfs_list_t              obd_unlinked_exports;
+       cfs_list_t              obd_delayed_exports;
+       int                     obd_num_exports;
        spinlock_t              obd_nid_lock;
        struct ldlm_namespace  *obd_namespace;
        struct ptlrpc_client    obd_ldlm_client; /* XXX OST/MDS only */
@@ -773,10 +773,10 @@ struct obd_device {
         cfs_list_t              obd_exports_timed;
         time_t                  obd_eviction_timer; /* for ping evictor */
 
-        int                              obd_max_recoverable_clients;
-        cfs_atomic_t                     obd_connected_clients;
-        int                              obd_stale_clients;
-        int                              obd_delayed_clients;
+       int                     obd_max_recoverable_clients;
+       atomic_t                obd_connected_clients;
+       int                     obd_stale_clients;
+       int                     obd_delayed_clients;
         /* this lock protects all recovery list_heads, timer and
          * obd_next_recovery_transno value */
        spinlock_t                       obd_recovery_task_lock;
@@ -792,15 +792,15 @@ struct obd_device {
        int                              obd_recovery_timeout;
        int                              obd_recovery_ir_factor;
 
-        /* new recovery stuff from CMD2 */
-        struct target_recovery_data      obd_recovery_data;
-        int                              obd_replayed_locks;
-        cfs_atomic_t                     obd_req_replay_clients;
-        cfs_atomic_t                     obd_lock_replay_clients;
-        /* all lists are protected by obd_recovery_task_lock */
-        cfs_list_t                       obd_req_replay_queue;
-        cfs_list_t                       obd_lock_replay_queue;
-        cfs_list_t                       obd_final_req_queue;
+       /* new recovery stuff from CMD2 */
+       struct target_recovery_data      obd_recovery_data;
+       int                              obd_replayed_locks;
+       atomic_t                     obd_req_replay_clients;
+       atomic_t                     obd_lock_replay_clients;
+       /* all lists are protected by obd_recovery_task_lock */
+       cfs_list_t                       obd_req_replay_queue;
+       cfs_list_t                       obd_lock_replay_queue;
+       cfs_list_t                       obd_final_req_queue;
 
        union {
 #ifdef HAVE_SERVER_SUPPORT
@@ -827,9 +827,9 @@ struct obd_device {
        struct proc_dir_entry   *obd_svc_procroot;
        struct lprocfs_stats    *obd_svc_stats;
        struct lprocfs_seq_vars *obd_vars;
-       cfs_atomic_t           obd_evict_inprogress;
-       wait_queue_head_t      obd_evict_inprogress_waitq;
-       cfs_list_t             obd_evict_list; /* protected with pet_lock */
+       atomic_t                obd_evict_inprogress;
+       wait_queue_head_t       obd_evict_inprogress_waitq;
+       cfs_list_t              obd_evict_list; /* protected with pet_lock */
 
         /**
          * Ldlm pool part. Save last calculated SLV and Limit.
@@ -1224,7 +1224,7 @@ struct md_open_data {
        struct obd_client_handle        *mod_och;
        struct ptlrpc_request           *mod_open_req;
        struct ptlrpc_request           *mod_close_req;
-       cfs_atomic_t                     mod_refcount;
+       atomic_t                         mod_refcount;
        bool                             mod_is_create;
 };
 
@@ -1399,18 +1399,18 @@ static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
 
 static inline struct md_open_data *obd_mod_alloc(void)
 {
-        struct md_open_data *mod;
-        OBD_ALLOC_PTR(mod);
-        if (mod == NULL)
-                return NULL;
-        cfs_atomic_set(&mod->mod_refcount, 1);
-        return mod;
+       struct md_open_data *mod;
+       OBD_ALLOC_PTR(mod);
+       if (mod == NULL)
+               return NULL;
+       atomic_set(&mod->mod_refcount, 1);
+       return mod;
 }
 
-#define obd_mod_get(mod) cfs_atomic_inc(&(mod)->mod_refcount)
+#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
 #define obd_mod_put(mod)                                          \
 ({                                                                \
-       if (cfs_atomic_dec_and_test(&(mod)->mod_refcount)) {      \
+       if (atomic_dec_and_test(&(mod)->mod_refcount)) {          \
                if ((mod)->mod_open_req)                          \
                        ptlrpc_req_finished((mod)->mod_open_req); \
                OBD_FREE_PTR(mod);                                \
@@ -1423,8 +1423,8 @@ void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent);
 /* return 1 if client should be resend request */
 static inline int client_should_resend(int resend, struct client_obd *cli)
 {
-        return cfs_atomic_read(&cli->cl_resends) ?
-               cfs_atomic_read(&cli->cl_resends) > resend : 1;
+       return atomic_read(&cli->cl_resends) ?
+              atomic_read(&cli->cl_resends) > resend : 1;
 }
 
 /**
index a540df9..2207359 100644 (file)
@@ -196,7 +196,7 @@ struct config_llog_data {
         struct ldlm_res_id          cld_resid;
         struct config_llog_instance cld_cfg;
         cfs_list_t                  cld_list_chain;
-        cfs_atomic_t                cld_refcount;
+       atomic_t                    cld_refcount;
        struct config_llog_data    *cld_sptlrpc;/* depended sptlrpc log */
        struct config_llog_data    *cld_params; /* common parameters log */
        struct config_llog_data    *cld_recover;/* imperative recover log */
@@ -235,53 +235,53 @@ extern void (*class_export_dump_hook)(struct obd_export *);
 
 #define class_export_rpc_inc(exp)                                       \
 ({                                                                      \
-        cfs_atomic_inc(&(exp)->exp_rpc_count);                          \
-        CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n",    \
-               (exp), cfs_atomic_read(&(exp)->exp_rpc_count));          \
+       atomic_inc(&(exp)->exp_rpc_count);                              \
+       CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n",    \
+              (exp), atomic_read(&(exp)->exp_rpc_count));              \
 })
 
 #define class_export_rpc_dec(exp)                                       \
 ({                                                                      \
-        LASSERT_ATOMIC_POS(&exp->exp_rpc_count);                        \
-        cfs_atomic_dec(&(exp)->exp_rpc_count);                          \
-        CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n",    \
-               (exp), cfs_atomic_read(&(exp)->exp_rpc_count));          \
+       LASSERT_ATOMIC_POS(&exp->exp_rpc_count);                        \
+       atomic_dec(&(exp)->exp_rpc_count);                              \
+       CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n",    \
+              (exp), atomic_read(&(exp)->exp_rpc_count));              \
 })
 
 #define class_export_lock_get(exp, lock)                                \
 ({                                                                      \
-        cfs_atomic_inc(&(exp)->exp_locks_count);                        \
-        __class_export_add_lock_ref(exp, lock);                         \
-        CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
-               (exp), cfs_atomic_read(&(exp)->exp_locks_count));        \
-        class_export_get(exp);                                          \
+       atomic_inc(&(exp)->exp_locks_count);                            \
+       __class_export_add_lock_ref(exp, lock);                         \
+       CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
+              (exp), atomic_read(&(exp)->exp_locks_count));            \
+       class_export_get(exp);                                          \
 })
 
 #define class_export_lock_put(exp, lock)                                \
 ({                                                                      \
-        LASSERT_ATOMIC_POS(&exp->exp_locks_count);                      \
-        cfs_atomic_dec(&(exp)->exp_locks_count);                        \
-        __class_export_del_lock_ref(exp, lock);                         \
-        CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
-               (exp), cfs_atomic_read(&(exp)->exp_locks_count));        \
-        class_export_put(exp);                                          \
+       LASSERT_ATOMIC_POS(&exp->exp_locks_count);                      \
+       atomic_dec(&(exp)->exp_locks_count);                            \
+       __class_export_del_lock_ref(exp, lock);                         \
+       CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
+              (exp), atomic_read(&(exp)->exp_locks_count));            \
+       class_export_put(exp);                                          \
 })
 
 #define class_export_cb_get(exp)                                        \
 ({                                                                      \
-        cfs_atomic_inc(&(exp)->exp_cb_count);                           \
-        CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
-               (exp), cfs_atomic_read(&(exp)->exp_cb_count));           \
-        class_export_get(exp);                                          \
+       atomic_inc(&(exp)->exp_cb_count);                               \
+       CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
+              (exp), atomic_read(&(exp)->exp_cb_count));               \
+       class_export_get(exp);                                          \
 })
 
 #define class_export_cb_put(exp)                                        \
 ({                                                                      \
-        LASSERT_ATOMIC_POS(&exp->exp_cb_count);                         \
-        cfs_atomic_dec(&(exp)->exp_cb_count);                           \
-        CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
-               (exp), cfs_atomic_read(&(exp)->exp_cb_count));           \
-        class_export_put(exp);                                          \
+       LASSERT_ATOMIC_POS(&exp->exp_cb_count);                         \
+       atomic_dec(&(exp)->exp_cb_count);                               \
+       CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
+              (exp), atomic_read(&(exp)->exp_cb_count));               \
+       class_export_put(exp);                                          \
 })
 
 /* genops.c */
index 210f273..db75316 100644 (file)
@@ -74,9 +74,9 @@ extern int at_early_margin;
 extern int at_extra;
 extern unsigned int obd_sync_filter;
 extern unsigned int obd_max_dirty_pages;
-extern cfs_atomic_t obd_unstable_pages;
-extern cfs_atomic_t obd_dirty_pages;
-extern cfs_atomic_t obd_dirty_transit_pages;
+extern atomic_t obd_unstable_pages;
+extern atomic_t obd_dirty_pages;
+extern atomic_t obd_dirty_transit_pages;
 extern unsigned int obd_alloc_fail_rate;
 extern char obd_jobid_var[];
 
@@ -517,7 +517,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
 #define OBD_FAIL_ONCE                           CFS_FAIL_ONCE
 #define OBD_FAILED                              CFS_FAILED
 
-extern cfs_atomic_t libcfs_kmemory;
+extern atomic_t libcfs_kmemory;
 
 #ifdef LPROCFS
 #define obd_memory_add(size)                                                  \
@@ -673,15 +673,15 @@ do {                                                                            \
        (ptr) = cptab == NULL ?                                               \
                vmalloc(size) :                                       \
                cfs_cpt_vmalloc(cptab, cpt, size);                            \
-        if (unlikely((ptr) == NULL)) {                                        \
-                CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n",           \
-                       (int)(size));                                          \
-                CERROR(LPU64" total bytes allocated by Lustre, %d by LNET\n", \
-                       obd_memory_sum(), cfs_atomic_read(&libcfs_kmemory));   \
-        } else {                                                              \
+       if (unlikely((ptr) == NULL)) {                                        \
+               CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n",           \
+                      (int)(size));                                          \
+               CERROR(LPU64" total bytes allocated by Lustre, %d by LNET\n", \
+                      obd_memory_sum(), atomic_read(&libcfs_kmemory));       \
+       } else {                                                              \
                memset(ptr, 0, size);                                         \
-                OBD_ALLOC_POST(ptr, size, "vmalloced");                       \
-        }                                                                     \
+               OBD_ALLOC_POST(ptr, size, "vmalloced");                       \
+       }                                                                     \
 } while(0)
 
 # define OBD_VMALLOC(ptr, size)                                                      \
@@ -859,11 +859,11 @@ do {                                                                            \
                        "("LPU64" bytes) allocated by Lustre, "                \
                        "%d total bytes by LNET\n",                            \
                        obd_memory_sum(),                                      \
-                      obd_pages_sum() << PAGE_CACHE_SHIFT,                     \
-                       obd_pages_sum(),                                       \
-                       cfs_atomic_read(&libcfs_kmemory));                     \
-        } else {                                                              \
-                obd_pages_add(0);                                             \
+                      obd_pages_sum() << PAGE_CACHE_SHIFT,                   \
+                      obd_pages_sum(),                                       \
+                      atomic_read(&libcfs_kmemory));                         \
+       } else {                                                              \
+               obd_pages_add(0);                                             \
                 CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / "      \
                        LPU64" bytes at %p.\n",                                \
                        (int)1,                                                \
index d387aa1..cbe415c 100644 (file)
@@ -69,7 +69,7 @@ struct echo_obd {
        spinlock_t                      eo_lock;
        __u64                           eo_lastino;
        struct lustre_handle            eo_nl_lock;
-       cfs_atomic_t                    eo_prep;
+       atomic_t                        eo_prep;
 };
 
 struct ost_obd {
index 0169cdf..3bf7261 100644 (file)
@@ -94,8 +94,8 @@ blkcnt_t dirty_cnt(struct inode *inode)
                 cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
                                                   results, 0, 1,
                                                   PAGECACHE_TAG_DIRTY);
-        if (cnt == 0 && cfs_atomic_read(&vob->cob_mmap_cnt) > 0)
-                cnt = 1;
+       if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
+               cnt = 1;
 
 #endif
         return (cnt > 0) ? 1 : 0;
index 979d3de..f69dd87 100644 (file)
@@ -1220,7 +1220,7 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
        struct lu_object_header *header = obj->co_lu.lo_header;
        wait_queue_t           waiter;
 
-       if (unlikely(cfs_atomic_read(&header->loh_ref) != 1)) {
+       if (unlikely(atomic_read(&header->loh_ref) != 1)) {
                struct lu_site *site = obj->co_lu.lo_dev->ld_site;
                struct lu_site_bkt_data *bkt;
 
@@ -1231,7 +1231,7 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 
                while (1) {
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       if (cfs_atomic_read(&header->loh_ref) == 1)
+                       if (atomic_read(&header->loh_ref) == 1)
                                break;
                        waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
                }
index ffc528b..aa2e34f 100644 (file)
@@ -87,7 +87,7 @@ static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
 /* ns_bref is the number of resources in this namespace */
 static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
 {
-       return cfs_atomic_read(&ns->ns_bref) == 0;
+       return atomic_read(&ns->ns_bref) == 0;
 }
 
 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
@@ -125,7 +125,7 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns);
 struct ldlm_cb_set_arg {
        struct ptlrpc_request_set       *set;
        int                              type; /* LDLM_{CP,BL,GL}_CALLBACK */
-       cfs_atomic_t                     restart;
+       atomic_t                         restart;
        cfs_list_t                      *list;
        union ldlm_gl_desc              *gl_desc; /* glimpse AST descriptor */
 };
index 83c19cf..b4c24fc 100644 (file)
@@ -352,14 +352,14 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
        if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
                cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
-        CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
-        CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
-        CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
-        CFS_INIT_LIST_HEAD(&cli->cl_loi_write_list);
-        CFS_INIT_LIST_HEAD(&cli->cl_loi_read_list);
-        client_obd_list_lock_init(&cli->cl_loi_list_lock);
-       cfs_atomic_set(&cli->cl_pending_w_pages, 0);
-       cfs_atomic_set(&cli->cl_pending_r_pages, 0);
+       CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
+       CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
+       CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
+       CFS_INIT_LIST_HEAD(&cli->cl_loi_write_list);
+       CFS_INIT_LIST_HEAD(&cli->cl_loi_read_list);
+       client_obd_list_lock_init(&cli->cl_loi_list_lock);
+       atomic_set(&cli->cl_pending_w_pages, 0);
+       atomic_set(&cli->cl_pending_r_pages, 0);
        cli->cl_r_in_flight = 0;
        cli->cl_w_in_flight = 0;
 
@@ -372,26 +372,26 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 
        /* lru for osc. */
        CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
-       cfs_atomic_set(&cli->cl_lru_shrinkers, 0);
-       cfs_atomic_set(&cli->cl_lru_busy, 0);
-       cfs_atomic_set(&cli->cl_lru_in_list, 0);
+       atomic_set(&cli->cl_lru_shrinkers, 0);
+       atomic_set(&cli->cl_lru_busy, 0);
+       atomic_set(&cli->cl_lru_in_list, 0);
        CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
        client_obd_list_lock_init(&cli->cl_lru_list_lock);
-       cfs_atomic_set(&cli->cl_unstable_count, 0);
+       atomic_set(&cli->cl_unstable_count, 0);
 
        init_waitqueue_head(&cli->cl_destroy_waitq);
-       cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
+       atomic_set(&cli->cl_destroy_in_flight, 0);
 #ifdef ENABLE_CHECKSUM
        /* Turn on checksumming by default. */
        cli->cl_checksum = 1;
-        /*
-         * The supported checksum types will be worked out at connect time
-         * Set cl_chksum* to CRC32 for now to avoid returning screwed info
-         * through procfs.
-         */
-        cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+       /*
+        * The supported checksum types will be worked out at connect time
+        * Set cl_chksum* to CRC32 for now to avoid returning screwed info
+        * through procfs.
+        */
+       cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
 #endif
-        cfs_atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
+       atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
 
        /* This value may be reduced at connect time in
         * ptlrpc_connect_interpret() . We initialize it to only
@@ -980,12 +980,12 @@ int target_handle_connect(struct ptlrpc_request *req)
 no_export:
                 OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_CONNECT, 2 * obd_timeout);
         } else if (req->rq_export == NULL &&
-                   cfs_atomic_read(&export->exp_rpc_count) > 0) {
+                  atomic_read(&export->exp_rpc_count) > 0) {
                 LCONSOLE_WARN("%s: Client %s (at %s) refused connection, "
                               "still busy with %d references\n",
                               target->obd_name, cluuid.uuid,
                               libcfs_nid2str(req->rq_peer.nid),
-                              cfs_atomic_read(&export->exp_refcount));
+                             atomic_read(&export->exp_refcount));
                 GOTO(out, rc = -EBUSY);
         } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
                 if (!strstr(cluuid.uuid, "mdt"))
@@ -1037,8 +1037,8 @@ no_export:
                        int     k; /* known */
                        int     s; /* stale/evicted */
 
-                       c = cfs_atomic_read(&target->obd_connected_clients);
-                       i = cfs_atomic_read(&target->obd_lock_replay_clients);
+                       c = atomic_read(&target->obd_connected_clients);
+                       i = atomic_read(&target->obd_lock_replay_clients);
                        k = target->obd_max_recoverable_clients;
                        s = target->obd_stale_clients;
                        t = cfs_timer_deadline(&target->obd_recovery_timer);
@@ -1192,9 +1192,9 @@ dont_check_exports:
                        spin_unlock(&target->obd_recovery_task_lock);
                 }
 
-               cfs_atomic_inc(&target->obd_req_replay_clients);
-               cfs_atomic_inc(&target->obd_lock_replay_clients);
-               if (cfs_atomic_inc_return(&target->obd_connected_clients) ==
+               atomic_inc(&target->obd_req_replay_clients);
+               atomic_inc(&target->obd_lock_replay_clients);
+               if (atomic_inc_return(&target->obd_connected_clients) ==
                    target->obd_max_recoverable_clients)
                        wake_up(&target->obd_next_transno_waitq);
        }
@@ -1319,23 +1319,23 @@ EXPORT_SYMBOL(target_destroy_export);
 static void target_request_copy_get(struct ptlrpc_request *req)
 {
        class_export_rpc_inc(req->rq_export);
-        LASSERT(cfs_list_empty(&req->rq_list));
-        CFS_INIT_LIST_HEAD(&req->rq_replay_list);
+       LASSERT(cfs_list_empty(&req->rq_list));
+       CFS_INIT_LIST_HEAD(&req->rq_replay_list);
 
        /* Increase refcount to keep request in queue. */
-       cfs_atomic_inc(&req->rq_refcount);
+       atomic_inc(&req->rq_refcount);
        /* Let export know it has replays to be handled. */
-        cfs_atomic_inc(&req->rq_export->exp_replay_count);
+       atomic_inc(&req->rq_export->exp_replay_count);
 }
 
 static void target_request_copy_put(struct ptlrpc_request *req)
 {
-        LASSERT(cfs_list_empty(&req->rq_replay_list));
-        LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
+       LASSERT(cfs_list_empty(&req->rq_replay_list));
+       LASSERT_ATOMIC_POS(&req->rq_export->exp_replay_count);
 
-        cfs_atomic_dec(&req->rq_export->exp_replay_count);
+       atomic_dec(&req->rq_export->exp_replay_count);
        class_export_rpc_dec(req->rq_export);
-        ptlrpc_server_drop_request(req);
+       ptlrpc_server_drop_request(req);
 }
 
 static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
@@ -1394,7 +1394,7 @@ static void target_finish_recovery(struct obd_device *obd)
                        "%d recovered and %d %s evicted.\n", obd->obd_name,
                        (int)elapsed_time / 60, (int)elapsed_time % 60,
                        obd->obd_max_recoverable_clients,
-                       cfs_atomic_read(&obd->obd_connected_clients),
+                       atomic_read(&obd->obd_connected_clients),
                        obd->obd_stale_clients,
                        obd->obd_stale_clients == 1 ? "was" : "were");
        }
@@ -1658,14 +1658,14 @@ static inline int exp_connect_healthy(struct obd_export *exp)
 /** if export done req_replay or has replay in queue */
 static inline int exp_req_replay_healthy(struct obd_export *exp)
 {
-        return (!exp->exp_req_replay_needed ||
-                cfs_atomic_read(&exp->exp_replay_count) > 0);
+       return (!exp->exp_req_replay_needed ||
+               atomic_read(&exp->exp_replay_count) > 0);
 }
 /** if export done lock_replay or has replay in queue */
 static inline int exp_lock_replay_healthy(struct obd_export *exp)
 {
-        return (!exp->exp_lock_replay_needed ||
-                cfs_atomic_read(&exp->exp_replay_count) > 0);
+       return (!exp->exp_lock_replay_needed ||
+               atomic_read(&exp->exp_replay_count) > 0);
 }
 
 static inline int exp_vbr_healthy(struct obd_export *exp)
@@ -1681,11 +1681,11 @@ static inline int exp_finished(struct obd_export *exp)
 /** Checking routines for recovery */
 static int check_for_clients(struct obd_device *obd)
 {
-        unsigned int clnts = cfs_atomic_read(&obd->obd_connected_clients);
+       unsigned int clnts = atomic_read(&obd->obd_connected_clients);
 
-        if (obd->obd_abort_recovery || obd->obd_recovery_expired)
-                return 1;
-        LASSERT(clnts <= obd->obd_max_recoverable_clients);
+       if (obd->obd_abort_recovery || obd->obd_recovery_expired)
+               return 1;
+       LASSERT(clnts <= obd->obd_max_recoverable_clients);
        return (clnts + obd->obd_stale_clients ==
                obd->obd_max_recoverable_clients);
 }
@@ -1698,61 +1698,61 @@ static int check_for_next_transno(struct obd_device *obd)
        ENTRY;
 
        spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
-                req = cfs_list_entry(obd->obd_req_replay_queue.next,
-                                     struct ptlrpc_request, rq_list);
-                req_transno = lustre_msg_get_transno(req->rq_reqmsg);
-        } else {
-                req_transno = 0;
-        }
-
-        connected = cfs_atomic_read(&obd->obd_connected_clients);
-        completed = connected - cfs_atomic_read(&obd->obd_req_replay_clients);
-        queue_len = obd->obd_requests_queued_for_recovery;
-        next_transno = obd->obd_next_recovery_transno;
-
-        CDEBUG(D_HA, "max: %d, connected: %d, completed: %d, queue_len: %d, "
-               "req_transno: "LPU64", next_transno: "LPU64"\n",
-               obd->obd_max_recoverable_clients, connected, completed,
-               queue_len, req_transno, next_transno);
-
-        if (obd->obd_abort_recovery) {
-                CDEBUG(D_HA, "waking for aborted recovery\n");
-                wake_up = 1;
-        } else if (obd->obd_recovery_expired) {
-                CDEBUG(D_HA, "waking for expired recovery\n");
-                wake_up = 1;
-        } else if (req_transno == next_transno) {
-                CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
-                wake_up = 1;
+       if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+               req = cfs_list_entry(obd->obd_req_replay_queue.next,
+                                    struct ptlrpc_request, rq_list);
+               req_transno = lustre_msg_get_transno(req->rq_reqmsg);
+       } else {
+               req_transno = 0;
+       }
+
+       connected = atomic_read(&obd->obd_connected_clients);
+       completed = connected - atomic_read(&obd->obd_req_replay_clients);
+       queue_len = obd->obd_requests_queued_for_recovery;
+       next_transno = obd->obd_next_recovery_transno;
+
+       CDEBUG(D_HA, "max: %d, connected: %d, completed: %d, queue_len: %d, "
+              "req_transno: "LPU64", next_transno: "LPU64"\n",
+              obd->obd_max_recoverable_clients, connected, completed,
+              queue_len, req_transno, next_transno);
+
+       if (obd->obd_abort_recovery) {
+               CDEBUG(D_HA, "waking for aborted recovery\n");
+               wake_up = 1;
+       } else if (obd->obd_recovery_expired) {
+               CDEBUG(D_HA, "waking for expired recovery\n");
+               wake_up = 1;
+       } else if (req_transno == next_transno) {
+               CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
+               wake_up = 1;
        } else if (queue_len > 0 &&
-                  queue_len == cfs_atomic_read(&obd->obd_req_replay_clients)) {
-                int d_lvl = D_HA;
-                /** handle gaps occured due to lost reply or VBR */
-                LASSERTF(req_transno >= next_transno,
-                         "req_transno: "LPU64", next_transno: "LPU64"\n",
-                         req_transno, next_transno);
-                if (req_transno > obd->obd_last_committed &&
-                    !obd->obd_version_recov)
-                        d_lvl = D_ERROR;
-                CDEBUG(d_lvl,
-                       "%s: waking for gap in transno, VBR is %s (skip: "
-                       LPD64", ql: %d, comp: %d, conn: %d, next: "LPD64
-                       ", last_committed: "LPD64")\n",
-                       obd->obd_name, obd->obd_version_recov ? "ON" : "OFF",
-                       next_transno, queue_len, completed, connected,
-                       req_transno, obd->obd_last_committed);
-                obd->obd_next_recovery_transno = req_transno;
-                wake_up = 1;
-       } else if (cfs_atomic_read(&obd->obd_req_replay_clients) == 0) {
+                  queue_len == atomic_read(&obd->obd_req_replay_clients)) {
+               int d_lvl = D_HA;
+               /** handle gaps occured due to lost reply or VBR */
+               LASSERTF(req_transno >= next_transno,
+                        "req_transno: "LPU64", next_transno: "LPU64"\n",
+                        req_transno, next_transno);
+               if (req_transno > obd->obd_last_committed &&
+                   !obd->obd_version_recov)
+                       d_lvl = D_ERROR;
+               CDEBUG(d_lvl,
+                      "%s: waking for gap in transno, VBR is %s (skip: "
+                      LPD64", ql: %d, comp: %d, conn: %d, next: "LPD64
+                      ", last_committed: "LPD64")\n",
+                      obd->obd_name, obd->obd_version_recov ? "ON" : "OFF",
+                      next_transno, queue_len, completed, connected,
+                      req_transno, obd->obd_last_committed);
+               obd->obd_next_recovery_transno = req_transno;
+               wake_up = 1;
+       } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
                CDEBUG(D_HA, "waking for completed recovery\n");
                wake_up = 1;
-        } else if (OBD_FAIL_CHECK(OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS)) {
-                CDEBUG(D_HA, "accepting transno gaps is explicitly allowed"
-                       " by fail_lock, waking up ("LPD64")\n", next_transno);
-                obd->obd_next_recovery_transno = req_transno;
-                wake_up = 1;
-        }
+       } else if (OBD_FAIL_CHECK(OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS)) {
+               CDEBUG(D_HA, "accepting transno gaps is explicitly allowed"
+                      " by fail_lock, waking up ("LPD64")\n", next_transno);
+               obd->obd_next_recovery_transno = req_transno;
+               wake_up = 1;
+       }
        spin_unlock(&obd->obd_recovery_task_lock);
        return wake_up;
 }
@@ -1762,19 +1762,19 @@ static int check_for_next_lock(struct obd_device *obd)
        int wake_up = 0;
 
        spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
-                CDEBUG(D_HA, "waking for next lock\n");
-                wake_up = 1;
-        } else if (cfs_atomic_read(&obd->obd_lock_replay_clients) == 0) {
-                CDEBUG(D_HA, "waking for completed lock replay\n");
-                wake_up = 1;
-        } else if (obd->obd_abort_recovery) {
-                CDEBUG(D_HA, "waking for aborted recovery\n");
-                wake_up = 1;
-        } else if (obd->obd_recovery_expired) {
-                CDEBUG(D_HA, "waking for expired recovery\n");
-                wake_up = 1;
-        }
+       if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+               CDEBUG(D_HA, "waking for next lock\n");
+               wake_up = 1;
+       } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
+               CDEBUG(D_HA, "waking for completed lock replay\n");
+               wake_up = 1;
+       } else if (obd->obd_abort_recovery) {
+               CDEBUG(D_HA, "waking for aborted recovery\n");
+               wake_up = 1;
+       } else if (obd->obd_recovery_expired) {
+               CDEBUG(D_HA, "waking for expired recovery\n");
+               wake_up = 1;
+       }
        spin_unlock(&obd->obd_recovery_task_lock);
 
        return wake_up;
@@ -1842,7 +1842,7 @@ static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
        } else {
                spin_unlock(&obd->obd_recovery_task_lock);
                LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
-               LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
+               LASSERT(atomic_read(&obd->obd_req_replay_clients) == 0);
                /** evict exports failed VBR */
                class_disconnect_stale_exports(obd, exp_vbr_healthy);
        }
@@ -1851,12 +1851,12 @@ static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
 
 static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
 {
-        struct ptlrpc_request *req = NULL;
+       struct ptlrpc_request *req = NULL;
 
-        CDEBUG(D_HA, "Waiting for lock\n");
-        if (target_recovery_overseer(obd, check_for_next_lock,
-                                     exp_lock_replay_healthy))
-                abort_lock_replay_queue(obd);
+       CDEBUG(D_HA, "Waiting for lock\n");
+       if (target_recovery_overseer(obd, check_for_next_lock,
+                                    exp_lock_replay_healthy))
+               abort_lock_replay_queue(obd);
 
        spin_lock(&obd->obd_recovery_task_lock);
        if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
@@ -1866,12 +1866,12 @@ static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
                spin_unlock(&obd->obd_recovery_task_lock);
        } else {
                spin_unlock(&obd->obd_recovery_task_lock);
-                LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
-                LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
-                /** evict exports failed VBR */
-                class_disconnect_stale_exports(obd, exp_vbr_healthy);
-        }
-        return req;
+               LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
+               LASSERT(atomic_read(&obd->obd_lock_replay_clients) == 0);
+               /** evict exports failed VBR */
+               class_disconnect_stale_exports(obd, exp_vbr_healthy);
+       }
+       return req;
 }
 
 static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
@@ -2003,11 +2003,11 @@ static int target_recovery_thread(void *arg)
                 abort_lock_replay_queue(obd);
         }
 
-        /* next stage: replay requests */
-        delta = jiffies;
-        CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
-               cfs_atomic_read(&obd->obd_req_replay_clients),
-               obd->obd_next_recovery_transno);
+       /* next stage: replay requests */
+       delta = jiffies;
+       CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
+              atomic_read(&obd->obd_req_replay_clients),
+              obd->obd_next_recovery_transno);
        while ((req = target_next_replay_req(obd))) {
                LASSERT(trd->trd_processing_task == current_pid());
                DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s",
@@ -2027,20 +2027,20 @@ static int target_recovery_thread(void *arg)
                 obd->obd_replayed_requests++;
         }
 
-        /**
-         * The second stage: replay locks
-         */
-        CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
-               cfs_atomic_read(&obd->obd_lock_replay_clients));
+       /**
+        * The second stage: replay locks
+        */
+       CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
+              atomic_read(&obd->obd_lock_replay_clients));
        while ((req = target_next_replay_lock(obd))) {
                LASSERT(trd->trd_processing_task == current_pid());
                DEBUG_REQ(D_HA, req, "processing lock from %s: ",
                          libcfs_nid2str(req->rq_peer.nid));
-                handle_recovery_req(thread, req,
-                                    trd->trd_recovery_handler);
-                target_request_copy_put(req);
-                obd->obd_replayed_locks++;
-        }
+               handle_recovery_req(thread, req,
+                                   trd->trd_recovery_handler);
+               target_request_copy_put(req);
+               obd->obd_replayed_locks++;
+       }
 
         /**
          * The third stage: reply on final pings, at this moment all clients
@@ -2139,9 +2139,9 @@ static void target_recovery_expired(unsigned long castmeharder)
        struct obd_device *obd = (struct obd_device *)castmeharder;
        CDEBUG(D_HA, "%s: recovery timed out; %d clients are still in recovery"
               " after %lds (%d clients connected)\n",
-              obd->obd_name, cfs_atomic_read(&obd->obd_lock_replay_clients),
+              obd->obd_name, atomic_read(&obd->obd_lock_replay_clients),
               cfs_time_current_sec()- obd->obd_recovery_start,
-              cfs_atomic_read(&obd->obd_connected_clients));
+              atomic_read(&obd->obd_connected_clients));
 
        obd->obd_recovery_expired = 1;
        wake_up(&obd->obd_next_transno_waitq);
@@ -2184,7 +2184,7 @@ static int target_process_req_flags(struct obd_device *obd,
                        spin_unlock(&exp->exp_lock);
 
                        LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
-                       cfs_atomic_dec(&obd->obd_req_replay_clients);
+                       atomic_dec(&obd->obd_req_replay_clients);
                } else {
                        spin_unlock(&exp->exp_lock);
                }
@@ -2198,7 +2198,7 @@ static int target_process_req_flags(struct obd_device *obd,
                        spin_unlock(&exp->exp_lock);
 
                        LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
-                       cfs_atomic_dec(&obd->obd_lock_replay_clients);
+                       atomic_dec(&obd->obd_lock_replay_clients);
                } else {
                        spin_unlock(&exp->exp_lock);
                }
@@ -2477,17 +2477,17 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
 
        spin_lock(&svcpt->scp_rep_lock);
 
-       cfs_atomic_inc(&svcpt->scp_nreps_difficult);
+       atomic_inc(&svcpt->scp_nreps_difficult);
 
-        if (netrc != 0) {
-                /* error sending: reply is off the net.  Also we need +1
-                 * reply ref until ptlrpc_handle_rs() is done
-                 * with the reply state (if the send was successful, there
-                 * would have been +1 ref for the net, which
-                 * reply_out_callback leaves alone) */
-                rs->rs_on_net = 0;
-                ptlrpc_rs_addref(rs);
-        }
+       if (netrc != 0) {
+               /* error sending: reply is off the net.  Also we need +1
+                * reply ref until ptlrpc_handle_rs() is done
+                * with the reply state (if the send was successful, there
+                * would have been +1 ref for the net, which
+                * reply_out_callback leaves alone) */
+               rs->rs_on_net = 0;
+               ptlrpc_rs_addref(rs);
+       }
 
        spin_lock(&rs->rs_lock);
        if (rs->rs_transno <= exp->exp_last_committed ||
@@ -2637,10 +2637,10 @@ int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
        ENTRY;
 
        /* If there is eviction in progress, wait for it to finish. */
-       if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+       if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
                *lwi = LWI_INTR(NULL, NULL);
                rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
-                                 !cfs_atomic_read(&exp->exp_obd->
+                                 !atomic_read(&exp->exp_obd->
                                                   obd_evict_inprogress),
                                  lwi);
        }
index 0c66b7d..214ee8c 100644 (file)
@@ -196,7 +196,7 @@ EXPORT_SYMBOL(ldlm_register_intent);
  */
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
 {
-        cfs_atomic_inc(&lock->l_refc);
+       atomic_inc(&lock->l_refc);
         return lock;
 }
 EXPORT_SYMBOL(ldlm_lock_get);
@@ -211,8 +211,8 @@ void ldlm_lock_put(struct ldlm_lock *lock)
         ENTRY;
 
         LASSERT(lock->l_resource != LP_POISON);
-        LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
-        if (cfs_atomic_dec_and_test(&lock->l_refc)) {
+       LASSERT(atomic_read(&lock->l_refc) > 0);
+       if (atomic_dec_and_test(&lock->l_refc)) {
                 struct ldlm_resource *res;
 
                 LDLM_DEBUG(lock,
@@ -482,7 +482,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         lock->l_resource = resource;
         lu_ref_add(&resource->lr_reference, "lock", lock);
 
-       cfs_atomic_set(&lock->l_refc, 2);
+       atomic_set(&lock->l_refc, 2);
        CFS_INIT_LIST_HEAD(&lock->l_res_link);
        CFS_INIT_LIST_HEAD(&lock->l_lru);
        CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
@@ -1979,7 +1979,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
        if (arg == NULL)
                RETURN(-ENOMEM);
 
-       cfs_atomic_set(&arg->restart, 0);
+       atomic_set(&arg->restart, 0);
        arg->list = rpc_list;
 
        switch (ast_type) {
@@ -2015,7 +2015,7 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, cfs_list_t *rpc_list,
        ptlrpc_set_wait(arg->set);
        ptlrpc_set_destroy(arg->set);
 
-       rc = cfs_atomic_read(&arg->restart) ? -ERESTART : 0;
+       rc = atomic_read(&arg->restart) ? -ERESTART : 0;
        GOTO(out, rc);
 out:
        OBD_FREE_PTR(arg);
@@ -2236,7 +2236,7 @@ int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
                       "Cancel lock %p for export %p (loop %d), still have "
                       "%d locks left on hash table.\n",
                       lock, exp, ecl->ecl_loop,
-                      cfs_atomic_read(&hs->hs_count));
+                      atomic_read(&hs->hs_count));
        }
 
        return 0;
@@ -2471,12 +2471,12 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                        "remote: "LPX64" expref: %d pid: %u timeout: %lu "
                       "lvb_type: %d\n",
                        lock,
-                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
+                      lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                      exp ? atomic_read(&exp->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
                 va_end(args);
                 return;
@@ -2490,18 +2490,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                        "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: "
                        LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
                        ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
+                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        PLDLMRES(resource),
-                       cfs_atomic_read(&resource->lr_refcount),
+                       atomic_read(&resource->lr_refcount),
                        ldlm_typename[resource->lr_type],
                        lock->l_policy_data.l_extent.start,
                        lock->l_policy_data.l_extent.end,
                        lock->l_req_extent.start, lock->l_req_extent.end,
                        lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       exp ? atomic_read(&exp->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout,
                        lock->l_lvb_type);
                break;
@@ -2513,18 +2513,18 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                        "["LPU64"->"LPU64"] flags: "LPX64" nid: %s "
                        "remote: "LPX64" expref: %d pid: %u timeout: %lu\n",
                        ldlm_lock_to_ns_name(lock), lock,
-                       lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
+                       lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        PLDLMRES(resource),
-                       cfs_atomic_read(&resource->lr_refcount),
+                       atomic_read(&resource->lr_refcount),
                        ldlm_typename[resource->lr_type],
                        lock->l_policy_data.l_flock.pid,
                        lock->l_policy_data.l_flock.start,
                        lock->l_policy_data.l_flock.end,
                        lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       exp ? atomic_read(&exp->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout);
                break;
 
@@ -2536,16 +2536,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                        "pid: %u timeout: %lu lvb_type: %d\n",
                        ldlm_lock_to_ns_name(lock),
                        lock, lock->l_handle.h_cookie,
-                       cfs_atomic_read(&lock->l_refc),
+                       atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        PLDLMRES(resource),
                        lock->l_policy_data.l_inodebits.bits,
-                       cfs_atomic_read(&resource->lr_refcount),
+                       atomic_read(&resource->lr_refcount),
                        ldlm_typename[resource->lr_type],
                        lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       exp ? atomic_read(&exp->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout,
                        lock->l_lvb_type);
                break;
@@ -2558,15 +2558,15 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
                        "timeout: %lu lvb_type: %d\n",
                        ldlm_lock_to_ns_name(lock),
                        lock, lock->l_handle.h_cookie,
-                       cfs_atomic_read(&lock->l_refc),
+                       atomic_read(&lock->l_refc),
                        lock->l_readers, lock->l_writers,
                        ldlm_lockname[lock->l_granted_mode],
                        ldlm_lockname[lock->l_req_mode],
                        PLDLMRES(resource),
-                       cfs_atomic_read(&resource->lr_refcount),
+                       atomic_read(&resource->lr_refcount),
                        ldlm_typename[resource->lr_type],
                        lock->l_flags, nid, lock->l_remote_handle.cookie,
-                       exp ? cfs_atomic_read(&exp->exp_refcount) : -99,
+                       exp ? atomic_read(&exp->exp_refcount) : -99,
                        lock->l_pid, lock->l_callback_timeout,
                        lock->l_lvb_type);
                break;
index 5d55852..6580b8d 100644 (file)
@@ -110,8 +110,8 @@ struct ldlm_bl_pool {
 
        wait_queue_head_t       blp_waitq;
        struct completion       blp_comp;
-       cfs_atomic_t            blp_num_threads;
-       cfs_atomic_t            blp_busy_threads;
+       atomic_t            blp_num_threads;
+       atomic_t            blp_busy_threads;
        int                     blp_min_threads;
        int                     blp_max_threads;
 };
@@ -717,9 +717,9 @@ static int ldlm_cb_interpret(const struct lu_env *env,
         LDLM_LOCK_RELEASE(lock);
 
        if (rc == -ERESTART)
-               cfs_atomic_inc(&arg->restart);
+               atomic_inc(&arg->restart);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 static inline int ldlm_ast_fini(struct ptlrpc_request *req,
@@ -734,7 +734,7 @@ static inline int ldlm_ast_fini(struct ptlrpc_request *req,
                rc = ptl_send_rpc(req, 1);
                ptlrpc_req_finished(req);
                if (rc == 0)
-                       cfs_atomic_inc(&arg->restart);
+                       atomic_inc(&arg->restart);
        } else {
                LDLM_LOCK_GET(lock);
                ptlrpc_set_add_req(arg->set, req);
@@ -2487,22 +2487,22 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
        static unsigned int num_bl = 0;
 
        spin_lock(&blp->blp_lock);
-        /* process a request from the blp_list at least every blp_num_threads */
-        if (!cfs_list_empty(&blp->blp_list) &&
-            (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
-                blwi = cfs_list_entry(blp->blp_list.next,
-                                      struct ldlm_bl_work_item, blwi_entry);
-        else
-                if (!cfs_list_empty(&blp->blp_prio_list))
-                        blwi = cfs_list_entry(blp->blp_prio_list.next,
-                                              struct ldlm_bl_work_item,
-                                              blwi_entry);
-
-        if (blwi) {
-                if (++num_bl >= cfs_atomic_read(&blp->blp_num_threads))
-                        num_bl = 0;
-                cfs_list_del(&blwi->blwi_entry);
-        }
+       /* process a request from the blp_list at least every blp_num_threads */
+       if (!cfs_list_empty(&blp->blp_list) &&
+           (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
+               blwi = cfs_list_entry(blp->blp_list.next,
+                                     struct ldlm_bl_work_item, blwi_entry);
+       else
+               if (!cfs_list_empty(&blp->blp_prio_list))
+                       blwi = cfs_list_entry(blp->blp_prio_list.next,
+                                             struct ldlm_bl_work_item,
+                                             blwi_entry);
+
+       if (blwi) {
+               if (++num_bl >= atomic_read(&blp->blp_num_threads))
+                       num_bl = 0;
+               cfs_list_del(&blwi->blwi_entry);
+       }
        spin_unlock(&blp->blp_lock);
 
        return blwi;
@@ -2524,13 +2524,13 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
        struct task_struct *task;
 
        init_completion(&bltd.bltd_comp);
-       bltd.bltd_num = cfs_atomic_read(&blp->blp_num_threads);
+       bltd.bltd_num = atomic_read(&blp->blp_num_threads);
        snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1,
                "ldlm_bl_%02d", bltd.bltd_num);
        task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name);
        if (IS_ERR(task)) {
                CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
-                      cfs_atomic_read(&blp->blp_num_threads), PTR_ERR(task));
+                      atomic_read(&blp->blp_num_threads), PTR_ERR(task));
                return PTR_ERR(task);
        }
        wait_for_completion(&bltd.bltd_comp);
@@ -2548,47 +2548,44 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
 static int ldlm_bl_thread_main(void *arg)
 {
         struct ldlm_bl_pool *blp;
+       struct ldlm_bl_thread_data *bltd = arg;
         ENTRY;
 
-        {
-                struct ldlm_bl_thread_data *bltd = arg;
+       blp = bltd->bltd_blp;
 
-                blp = bltd->bltd_blp;
+       atomic_inc(&blp->blp_num_threads);
+       atomic_inc(&blp->blp_busy_threads);
 
-               cfs_atomic_inc(&blp->blp_num_threads);
-                cfs_atomic_inc(&blp->blp_busy_threads);
+       complete(&bltd->bltd_comp);
+       /* cannot use bltd after this, it is only on caller's stack */
 
-               complete(&bltd->bltd_comp);
-                /* cannot use bltd after this, it is only on caller's stack */
-        }
-
-        while (1) {
-                struct l_wait_info lwi = { 0 };
-                struct ldlm_bl_work_item *blwi = NULL;
-                int busy;
-
-                blwi = ldlm_bl_get_work(blp);
-
-                if (blwi == NULL) {
-                        cfs_atomic_dec(&blp->blp_busy_threads);
-                        l_wait_event_exclusive(blp->blp_waitq,
-                                         (blwi = ldlm_bl_get_work(blp)) != NULL,
-                                         &lwi);
-                        busy = cfs_atomic_inc_return(&blp->blp_busy_threads);
-                } else {
-                        busy = cfs_atomic_read(&blp->blp_busy_threads);
-                }
+       while (1) {
+               struct l_wait_info lwi = { 0 };
+               struct ldlm_bl_work_item *blwi = NULL;
+               int busy;
+
+               blwi = ldlm_bl_get_work(blp);
+
+               if (blwi == NULL) {
+                       atomic_dec(&blp->blp_busy_threads);
+                       l_wait_event_exclusive(blp->blp_waitq,
+                                        (blwi = ldlm_bl_get_work(blp)) != NULL,
+                                        &lwi);
+                       busy = atomic_inc_return(&blp->blp_busy_threads);
+               } else {
+                       busy = atomic_read(&blp->blp_busy_threads);
+               }
 
-                if (blwi->blwi_ns == NULL)
-                        /* added by ldlm_cleanup() */
-                        break;
+               if (blwi->blwi_ns == NULL)
+                       /* added by ldlm_cleanup() */
+                       break;
 
-                /* Not fatal if racy and have a few too many threads */
-                if (unlikely(busy < blp->blp_max_threads &&
-                             busy >= cfs_atomic_read(&blp->blp_num_threads) &&
-                             !blwi->blwi_mem_pressure))
-                        /* discard the return value, we tried */
-                        ldlm_bl_thread_start(blp);
+               /* Not fatal if racy and have a few too many threads */
+               if (unlikely(busy < blp->blp_max_threads &&
+                            busy >= atomic_read(&blp->blp_num_threads) &&
+                            !blwi->blwi_mem_pressure))
+                       /* discard the return value, we tried */
+                       ldlm_bl_thread_start(blp);
 
                 if (blwi->blwi_mem_pressure)
                        memory_pressure_set();
@@ -2615,12 +2612,12 @@ static int ldlm_bl_thread_main(void *arg)
                        OBD_FREE(blwi, sizeof(*blwi));
                else
                        complete(&blwi->blwi_comp);
-        }
+       }
 
-        cfs_atomic_dec(&blp->blp_busy_threads);
-        cfs_atomic_dec(&blp->blp_num_threads);
+       atomic_dec(&blp->blp_busy_threads);
+       atomic_dec(&blp->blp_num_threads);
        complete(&blp->blp_comp);
-        RETURN(0);
+       RETURN(0);
 }
 
 #endif
@@ -2885,8 +2882,8 @@ static int ldlm_setup(void)
        CFS_INIT_LIST_HEAD(&blp->blp_list);
        CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
        init_waitqueue_head(&blp->blp_waitq);
-       cfs_atomic_set(&blp->blp_num_threads, 0);
-       cfs_atomic_set(&blp->blp_busy_threads, 0);
+       atomic_set(&blp->blp_num_threads, 0);
+       atomic_set(&blp->blp_busy_threads, 0);
 
 #ifdef __KERNEL__
        if (ldlm_num_threads == 0) {
@@ -2954,7 +2951,7 @@ static int ldlm_cleanup(void)
        if (ldlm_state->ldlm_bl_pool != NULL) {
                struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
 
-               while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
+               while (atomic_read(&blp->blp_num_threads) > 0) {
                        struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
 
                        init_completion(&blp->blp_comp);
index c1b7ac6..6cf50f2 100644 (file)
@@ -230,17 +230,17 @@ static inline int ldlm_pool_t2gsp(unsigned int t)
  */
 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
 {
-        int granted, grant_step, limit;
+       int granted, grant_step, limit;
 
-        limit = ldlm_pool_get_limit(pl);
-        granted = cfs_atomic_read(&pl->pl_granted);
+       limit = ldlm_pool_get_limit(pl);
+       granted = atomic_read(&pl->pl_granted);
 
-        grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
-        grant_step = ((limit - granted) * grant_step) / 100;
-        pl->pl_grant_plan = granted + grant_step;
-        limit = (limit * 5) >> 2;
-        if (pl->pl_grant_plan > limit)
-                pl->pl_grant_plan = limit;
+       grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+       grant_step = ((limit - granted) * grant_step) / 100;
+       pl->pl_grant_plan = granted + grant_step;
+       limit = (limit * 5) >> 2;
+       if (pl->pl_grant_plan > limit)
+               pl->pl_grant_plan = limit;
 }
 
 /**
@@ -258,11 +258,11 @@ static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
         __u64 grant_usage;
         __u32 limit;
 
-        slv = pl->pl_server_lock_volume;
-        grant_plan = pl->pl_grant_plan;
-        limit = ldlm_pool_get_limit(pl);
-        granted = cfs_atomic_read(&pl->pl_granted);
-        round_up = granted < limit;
+       slv = pl->pl_server_lock_volume;
+       grant_plan = pl->pl_grant_plan;
+       limit = ldlm_pool_get_limit(pl);
+       granted = atomic_read(&pl->pl_granted);
+       round_up = granted < limit;
 
         grant_usage = max_t(int, limit - (granted - grant_plan), 1);
 
@@ -295,22 +295,22 @@ static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
  */
 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
 {
-        int grant_plan = pl->pl_grant_plan;
-        __u64 slv = pl->pl_server_lock_volume;
-        int granted = cfs_atomic_read(&pl->pl_granted);
-        int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
-        int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
-
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
-                            slv);
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
-                            granted);
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
-                            grant_rate);
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
-                            grant_plan);
-        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
-                            cancel_rate);
+       int grant_plan = pl->pl_grant_plan;
+       __u64 slv = pl->pl_server_lock_volume;
+       int granted = atomic_read(&pl->pl_granted);
+       int grant_rate = atomic_read(&pl->pl_grant_rate);
+       int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+
+       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
+                           slv);
+       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
+                           granted);
+       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
+                           grant_rate);
+       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
+                           grant_plan);
+       lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
+                           cancel_rate);
 }
 
 /**
@@ -386,22 +386,22 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
  * locks smaller in next 10h.
  */
 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
-                                int nr, unsigned int gfp_mask)
+                               int nr, unsigned int gfp_mask)
 {
-        __u32 limit;
+       __u32 limit;
 
-        /*
-         * VM is asking how many entries may be potentially freed.
-         */
-        if (nr == 0)
-                return cfs_atomic_read(&pl->pl_granted);
+       /*
+        * VM is asking how many entries may be potentially freed.
+        */
+       if (nr == 0)
+               return atomic_read(&pl->pl_granted);
 
-        /*
-         * Client already canceled locks but server is already in shrinker
-         * and can't cancel anything. Let's catch this race.
-         */
-        if (cfs_atomic_read(&pl->pl_granted) == 0)
-                RETURN(0);
+       /*
+        * Client already canceled locks but server is already in shrinker
+        * and can't cancel anything. Let's catch this race.
+        */
+       if (atomic_read(&pl->pl_granted) == 0)
+               RETURN(0);
 
        spin_lock(&pl->pl_lock);
 
@@ -589,18 +589,18 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                 goto recalc;
 
        spin_lock(&pl->pl_lock);
-        if (recalc_interval_sec > 0) {
-                /*
-                 * Update pool statistics every 1s.
-                 */
-                ldlm_pool_recalc_stats(pl);
+       if (recalc_interval_sec > 0) {
+               /*
+                * Update pool statistics every 1s.
+                */
+               ldlm_pool_recalc_stats(pl);
 
-                /*
-                 * Zero out all rates and speed for the last period.
-                 */
-                cfs_atomic_set(&pl->pl_grant_rate, 0);
-                cfs_atomic_set(&pl->pl_cancel_rate, 0);
-        }
+               /*
+                * Zero out all rates and speed for the last period.
+                */
+               atomic_set(&pl->pl_grant_rate, 0);
+               atomic_set(&pl->pl_cancel_rate, 0);
+       }
        spin_unlock(&pl->pl_lock);
 
  recalc:
@@ -665,16 +665,16 @@ static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
        __u32 limit;
 
        spin_lock(&pl->pl_lock);
-        slv = pl->pl_server_lock_volume;
-        clv = pl->pl_client_lock_volume;
-        limit = ldlm_pool_get_limit(pl);
-        grant_plan = pl->pl_grant_plan;
-        granted = cfs_atomic_read(&pl->pl_granted);
-        grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
-        cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
-        grant_speed = grant_rate - cancel_rate;
-        lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
-        grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+       slv = pl->pl_server_lock_volume;
+       clv = pl->pl_client_lock_volume;
+       limit = ldlm_pool_get_limit(pl);
+       grant_plan = pl->pl_grant_plan;
+       granted = atomic_read(&pl->pl_granted);
+       grant_rate = atomic_read(&pl->pl_grant_rate);
+       cancel_rate = atomic_read(&pl->pl_cancel_rate);
+       grant_speed = grant_rate - cancel_rate;
+       lvf = atomic_read(&pl->pl_lock_volume_factor);
+       grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
        spin_unlock(&pl->pl_lock);
 
        seq_printf(m, "LDLM pool state (%s):\n"
@@ -703,8 +703,8 @@ static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
 
        spin_lock(&pl->pl_lock);
        /* serialize with ldlm_pool_recalc */
-       grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
-                       cfs_atomic_read(&pl->pl_cancel_rate);
+       grant_speed = atomic_read(&pl->pl_grant_rate) -
+                       atomic_read(&pl->pl_cancel_rate);
        spin_unlock(&pl->pl_lock);
        return lprocfs_uint_seq_show(m, &grant_speed);
 }
@@ -846,22 +846,22 @@ static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
 #endif
 
 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
-                   int idx, ldlm_side_t client)
+                  int idx, ldlm_side_t client)
 {
        int rc;
        ENTRY;
 
        spin_lock_init(&pl->pl_lock);
-        cfs_atomic_set(&pl->pl_granted, 0);
-        pl->pl_recalc_time = cfs_time_current_sec();
-        cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
+       atomic_set(&pl->pl_granted, 0);
+       pl->pl_recalc_time = cfs_time_current_sec();
+       atomic_set(&pl->pl_lock_volume_factor, 1);
 
-        cfs_atomic_set(&pl->pl_grant_rate, 0);
-        cfs_atomic_set(&pl->pl_cancel_rate, 0);
-        pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
+       atomic_set(&pl->pl_grant_rate, 0);
+       atomic_set(&pl->pl_cancel_rate, 0);
+       pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
 
-        snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
-                 ldlm_ns_name(ns), idx);
+       snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
+                ldlm_ns_name(ns), idx);
 
         if (client == LDLM_NAMESPACE_SERVER) {
                 pl->pl_ops = &ldlm_srv_pool_ops;
@@ -905,26 +905,26 @@ EXPORT_SYMBOL(ldlm_pool_fini);
  */
 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
 {
-        /*
-         * FLOCK locks are special in a sense that they are almost never
-         * cancelled, instead special kind of lock is used to drop them.
-         * also there is no LRU for flock locks, so no point in tracking
-         * them anyway.
-         */
-        if (lock->l_resource->lr_type == LDLM_FLOCK)
-                return;
+       /*
+        * FLOCK locks are special in a sense that they are almost never
+        * cancelled, instead special kind of lock is used to drop them.
+        * also there is no LRU for flock locks, so no point in tracking
+        * them anyway.
+        */
+       if (lock->l_resource->lr_type == LDLM_FLOCK)
+               return;
 
-        cfs_atomic_inc(&pl->pl_granted);
-        cfs_atomic_inc(&pl->pl_grant_rate);
-        lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
-        /*
-         * Do not do pool recalc for client side as all locks which
-         * potentially may be canceled has already been packed into
-         * enqueue/cancel rpc. Also we do not want to run out of stack
-         * with too long call paths.
-         */
-        if (ns_is_server(ldlm_pl2ns(pl)))
-                ldlm_pool_recalc(pl);
+       atomic_inc(&pl->pl_granted);
+       atomic_inc(&pl->pl_grant_rate);
+       lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
+       /*
+        * Do not do pool recalc for client side as all locks which
+        * potentially may be canceled has already been packed into
+        * enqueue/cancel rpc. Also we do not want to run out of stack
+        * with too long call paths.
+        */
+       if (ns_is_server(ldlm_pl2ns(pl)))
+               ldlm_pool_recalc(pl);
 }
 EXPORT_SYMBOL(ldlm_pool_add);
 
@@ -933,20 +933,20 @@ EXPORT_SYMBOL(ldlm_pool_add);
  */
 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
 {
-        /*
-         * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
-         */
-        if (lock->l_resource->lr_type == LDLM_FLOCK)
-                return;
+       /*
+        * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
+        */
+       if (lock->l_resource->lr_type == LDLM_FLOCK)
+               return;
 
-        LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
-        cfs_atomic_dec(&pl->pl_granted);
-        cfs_atomic_inc(&pl->pl_cancel_rate);
+       LASSERT(atomic_read(&pl->pl_granted) > 0);
+       atomic_dec(&pl->pl_granted);
+       atomic_inc(&pl->pl_cancel_rate);
 
-        lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
+       lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 
-        if (ns_is_server(ldlm_pl2ns(pl)))
-                ldlm_pool_recalc(pl);
+       if (ns_is_server(ldlm_pl2ns(pl)))
+               ldlm_pool_recalc(pl);
 }
 EXPORT_SYMBOL(ldlm_pool_del);
 
@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL(ldlm_pool_set_clv);
  */
 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 {
-        return cfs_atomic_read(&pl->pl_limit);
+       return atomic_read(&pl->pl_limit);
 }
 EXPORT_SYMBOL(ldlm_pool_get_limit);
 
@@ -1020,7 +1020,7 @@ EXPORT_SYMBOL(ldlm_pool_get_limit);
  */
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-        cfs_atomic_set(&pl->pl_limit, limit);
+       atomic_set(&pl->pl_limit, limit);
 }
 EXPORT_SYMBOL(ldlm_pool_set_limit);
 
@@ -1029,14 +1029,14 @@ EXPORT_SYMBOL(ldlm_pool_set_limit);
  */
 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
 {
-        return cfs_atomic_read(&pl->pl_lock_volume_factor);
+       return atomic_read(&pl->pl_lock_volume_factor);
 }
 EXPORT_SYMBOL(ldlm_pool_get_lvf);
 
 #ifdef __KERNEL__
 static unsigned int ldlm_pool_granted(struct ldlm_pool *pl)
 {
-        return cfs_atomic_read(&pl->pl_granted);
+       return atomic_read(&pl->pl_granted);
 }
 
 static struct ptlrpc_thread *ldlm_pools_thread;
index 369b8df..e140e0f 100644 (file)
@@ -2133,18 +2133,17 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
 }
 
 static int replay_lock_interpret(const struct lu_env *env,
-                                 struct ptlrpc_request *req,
-                                 struct ldlm_async_args *aa, int rc)
+                                struct ptlrpc_request *req,
+                                struct ldlm_async_args *aa, int rc)
 {
-        struct ldlm_lock     *lock;
-        struct ldlm_reply    *reply;
-        struct obd_export    *exp;
-
-        ENTRY;
-        cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
-        if (rc != ELDLM_OK)
-                GOTO(out, rc);
+       struct ldlm_lock     *lock;
+       struct ldlm_reply    *reply;
+       struct obd_export    *exp;
 
+       ENTRY;
+       atomic_dec(&req->rq_import->imp_replay_inflight);
+       if (rc != ELDLM_OK)
+               GOTO(out, rc);
 
         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
         if (reply == NULL)
@@ -2253,18 +2252,18 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
          * also, we mark the request to be put on a dedicated
          * queue to be processed after all request replayes.
          * bug 6063 */
-        lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
+       lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
 
-        LDLM_DEBUG(lock, "replaying lock:");
+       LDLM_DEBUG(lock, "replaying lock:");
 
-        cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
-        CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-        aa = ptlrpc_req_async_args(req);
-        aa->lock_handle = body->lock_handle[0];
-        req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
-        ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+       atomic_inc(&req->rq_import->imp_replay_inflight);
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+       aa->lock_handle = body->lock_handle[0];
+       req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+       ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 /**
@@ -2298,39 +2297,39 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
 
 int ldlm_replay_locks(struct obd_import *imp)
 {
-        struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
-        CFS_LIST_HEAD(list);
-        struct ldlm_lock *lock, *next;
-        int rc = 0;
+       struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
+       CFS_LIST_HEAD(list);
+       struct ldlm_lock *lock, *next;
+       int rc = 0;
 
-        ENTRY;
+       ENTRY;
 
-        LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+       LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
 
-        /* don't replay locks if import failed recovery */
-        if (imp->imp_vbr_failed)
-                RETURN(0);
+       /* don't replay locks if import failed recovery */
+       if (imp->imp_vbr_failed)
+               RETURN(0);
 
-        /* ensure this doesn't fall to 0 before all have been queued */
-        cfs_atomic_inc(&imp->imp_replay_inflight);
+       /* ensure this doesn't fall to 0 before all have been queued */
+       atomic_inc(&imp->imp_replay_inflight);
 
-        if (ldlm_cancel_unused_locks_before_replay)
-                ldlm_cancel_unused_locks_for_replay(ns);
+       if (ldlm_cancel_unused_locks_before_replay)
+               ldlm_cancel_unused_locks_for_replay(ns);
 
-        ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
+       ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
 
-        cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
-                cfs_list_del_init(&lock->l_pending_chain);
-                if (rc) {
-                        LDLM_LOCK_RELEASE(lock);
-                        continue; /* or try to do the rest? */
-                }
-                rc = replay_one_lock(imp, lock);
-                LDLM_LOCK_RELEASE(lock);
-        }
+       cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+               cfs_list_del_init(&lock->l_pending_chain);
+               if (rc) {
+                       LDLM_LOCK_RELEASE(lock);
+                       continue; /* or try to do the rest? */
+               }
+               rc = replay_one_lock(imp, lock);
+               LDLM_LOCK_RELEASE(lock);
+       }
 
-        cfs_atomic_dec(&imp->imp_replay_inflight);
+       atomic_dec(&imp->imp_replay_inflight);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(ldlm_replay_locks);
index e504cff..dc870e9 100644 (file)
@@ -624,7 +624,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
        CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
        CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
        spin_lock_init(&ns->ns_lock);
-       cfs_atomic_set(&ns->ns_bref, 0);
+       atomic_set(&ns->ns_bref, 0);
        init_waitqueue_head(&ns->ns_waitq);
 
        ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
@@ -772,7 +772,7 @@ static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
        CERROR("%s: namespace resource "DLDLMRES" (%p) refcount nonzero "
               "(%d) after lock cleanup; forcing cleanup.\n",
               ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
-              cfs_atomic_read(&res->lr_refcount) - 1);
+              atomic_read(&res->lr_refcount) - 1);
 
        ldlm_resource_dump(D_ERROR, res);
        unlock_res(res);
@@ -806,46 +806,46 @@ EXPORT_SYMBOL(ldlm_namespace_cleanup);
  */
 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
 {
-        ENTRY;
+       ENTRY;
 
-        /* At shutdown time, don't call the cancellation callback */
-        ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
+       /* At shutdown time, don't call the cancellation callback */
+       ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
 
-        if (cfs_atomic_read(&ns->ns_bref) > 0) {
-                struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-                int rc;
-                CDEBUG(D_DLMTRACE,
-                       "dlm namespace %s free waiting on refcount %d\n",
-                       ldlm_ns_name(ns), cfs_atomic_read(&ns->ns_bref));
+       if (atomic_read(&ns->ns_bref) > 0) {
+               struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+               int rc;
+               CDEBUG(D_DLMTRACE,
+                      "dlm namespace %s free waiting on refcount %d\n",
+                      ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
 force_wait:
                if (force)
                        lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
 
-                rc = l_wait_event(ns->ns_waitq,
-                                  cfs_atomic_read(&ns->ns_bref) == 0, &lwi);
-
-                /* Forced cleanups should be able to reclaim all references,
-                 * so it's safe to wait forever... we can't leak locks... */
-                if (force && rc == -ETIMEDOUT) {
-                        LCONSOLE_ERROR("Forced cleanup waiting for %s "
-                                       "namespace with %d resources in use, "
-                                       "(rc=%d)\n", ldlm_ns_name(ns),
-                                       cfs_atomic_read(&ns->ns_bref), rc);
-                        GOTO(force_wait, rc);
-                }
+               rc = l_wait_event(ns->ns_waitq,
+                                 atomic_read(&ns->ns_bref) == 0, &lwi);
+
+               /* Forced cleanups should be able to reclaim all references,
+                * so it's safe to wait forever... we can't leak locks... */
+               if (force && rc == -ETIMEDOUT) {
+                       LCONSOLE_ERROR("Forced cleanup waiting for %s "
+                                      "namespace with %d resources in use, "
+                                      "(rc=%d)\n", ldlm_ns_name(ns),
+                                      atomic_read(&ns->ns_bref), rc);
+                       GOTO(force_wait, rc);
+               }
 
-                if (cfs_atomic_read(&ns->ns_bref)) {
-                        LCONSOLE_ERROR("Cleanup waiting for %s namespace "
-                                       "with %d resources in use, (rc=%d)\n",
-                                       ldlm_ns_name(ns),
-                                       cfs_atomic_read(&ns->ns_bref), rc);
-                        RETURN(ELDLM_NAMESPACE_EXISTS);
-                }
-                CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
-                       ldlm_ns_name(ns));
-        }
+               if (atomic_read(&ns->ns_bref)) {
+                       LCONSOLE_ERROR("Cleanup waiting for %s namespace "
+                                      "with %d resources in use, (rc=%d)\n",
+                                      ldlm_ns_name(ns),
+                                      atomic_read(&ns->ns_bref), rc);
+                       RETURN(ELDLM_NAMESPACE_EXISTS);
+               }
+               CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
+                      ldlm_ns_name(ns));
+       }
 
-        RETURN(ELDLM_OK);
+       RETURN(ELDLM_OK);
 }
 
 /**
@@ -952,19 +952,19 @@ EXPORT_SYMBOL(ldlm_namespace_free);
 
 void ldlm_namespace_get(struct ldlm_namespace *ns)
 {
-        cfs_atomic_inc(&ns->ns_bref);
+       atomic_inc(&ns->ns_bref);
 }
 EXPORT_SYMBOL(ldlm_namespace_get);
 
 /* This is only for callers that care about refcount */
 int ldlm_namespace_get_return(struct ldlm_namespace *ns)
 {
-        return cfs_atomic_inc_return(&ns->ns_bref);
+       return atomic_inc_return(&ns->ns_bref);
 }
 
 void ldlm_namespace_put(struct ldlm_namespace *ns)
 {
-       if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
+       if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
                wake_up(&ns->ns_waitq);
                spin_unlock(&ns->ns_lock);
        }
@@ -1037,13 +1037,13 @@ static struct ldlm_resource *ldlm_resource_new(void)
         CFS_INIT_LIST_HEAD(&res->lr_waiting);
 
        /* Initialize interval trees for each lock mode. */
-        for (idx = 0; idx < LCK_MODE_NUM; idx++) {
-                res->lr_itree[idx].lit_size = 0;
-                res->lr_itree[idx].lit_mode = 1 << idx;
-                res->lr_itree[idx].lit_root = NULL;
-        }
+       for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+               res->lr_itree[idx].lit_size = 0;
+               res->lr_itree[idx].lit_mode = 1 << idx;
+               res->lr_itree[idx].lit_root = NULL;
+       }
 
-        cfs_atomic_set(&res->lr_refcount, 1);
+       atomic_set(&res->lr_refcount, 1);
        spin_lock_init(&res->lr_lock);
        lu_ref_init(&res->lr_reference);
 
@@ -1182,12 +1182,12 @@ EXPORT_SYMBOL(ldlm_resource_get);
 
 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
 {
-        LASSERT(res != NULL);
-        LASSERT(res != LP_POISON);
-        cfs_atomic_inc(&res->lr_refcount);
-        CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
-               cfs_atomic_read(&res->lr_refcount));
-        return res;
+       LASSERT(res != NULL);
+       LASSERT(res != LP_POISON);
+       atomic_inc(&res->lr_refcount);
+       CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
+              atomic_read(&res->lr_refcount));
+       return res;
 }
 
 static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
@@ -1220,55 +1220,55 @@ static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
 /* Returns 1 if the resource was freed, 0 if it remains. */
 int ldlm_resource_putref(struct ldlm_resource *res)
 {
-        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-        cfs_hash_bd_t   bd;
-
-        LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
-        CDEBUG(D_INFO, "putref res: %p count: %d\n",
-               res, cfs_atomic_read(&res->lr_refcount) - 1);
-
-        cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
-        if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
-                __ldlm_resource_putref_final(&bd, res);
-                cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
-                if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
-                        ns->ns_lvbo->lvbo_free(res);
-                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
-                return 1;
-        }
-        return 0;
+       struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+       cfs_hash_bd_t   bd;
+
+       LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+       CDEBUG(D_INFO, "putref res: %p count: %d\n",
+              res, atomic_read(&res->lr_refcount) - 1);
+
+       cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
+       if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
+               __ldlm_resource_putref_final(&bd, res);
+               cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+               if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
+                       ns->ns_lvbo->lvbo_free(res);
+               OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+               return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL(ldlm_resource_putref);
 
 /* Returns 1 if the resource was freed, 0 if it remains. */
 int ldlm_resource_putref_locked(struct ldlm_resource *res)
 {
-        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-
-        LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
-        CDEBUG(D_INFO, "putref res: %p count: %d\n",
-               res, cfs_atomic_read(&res->lr_refcount) - 1);
-
-        if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
-                cfs_hash_bd_t bd;
-
-                cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
-                                &res->lr_name, &bd);
-                __ldlm_resource_putref_final(&bd, res);
-                cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
-                /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
-                 * so we should never be here while calling cfs_hash_del,
-                 * cfs_hash_for_each_nolock is the only case we can get
-                 * here, which is safe to release cfs_hash_bd_lock.
-                 */
-                if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
-                        ns->ns_lvbo->lvbo_free(res);
-                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+       struct ldlm_namespace *ns = ldlm_res_to_ns(res);
 
-                cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
-                return 1;
-        }
-        return 0;
+       LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+       CDEBUG(D_INFO, "putref res: %p count: %d\n",
+              res, atomic_read(&res->lr_refcount) - 1);
+
+       if (atomic_dec_and_test(&res->lr_refcount)) {
+               cfs_hash_bd_t bd;
+
+               cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
+                               &res->lr_name, &bd);
+               __ldlm_resource_putref_final(&bd, res);
+               cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+               /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
+                * so we should never be here while calling cfs_hash_del,
+                * cfs_hash_for_each_nolock is the only case we can get
+                * here, which is safe to release cfs_hash_bd_lock.
+                */
+               if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
+                       ns->ns_lvbo->lvbo_free(res);
+               OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+
+               cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
+               return 1;
+       }
+       return 0;
 }
 
 /**
@@ -1378,19 +1378,19 @@ static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  */
 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
 {
-        if (!((libcfs_debug | D_ERROR) & level))
-                return;
+       if (!((libcfs_debug | D_ERROR) & level))
+               return;
 
-        CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
-               ldlm_ns_name(ns), cfs_atomic_read(&ns->ns_bref),
-               ns_is_client(ns) ? "client" : "server");
+       CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
+              ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
+              ns_is_client(ns) ? "client" : "server");
 
-        if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
-                return;
+       if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
+               return;
 
-        cfs_hash_for_each_nolock(ns->ns_rs_hash,
-                                 ldlm_res_hash_dump,
-                                 (void *)(unsigned long)level);
+       cfs_hash_for_each_nolock(ns->ns_rs_hash,
+                                ldlm_res_hash_dump,
+                                (void *)(unsigned long)level);
        spin_lock(&ns->ns_lock);
        ns->ns_next_dump = cfs_time_shift(10);
        spin_unlock(&ns->ns_lock);
@@ -1411,12 +1411,12 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
                return;
 
        CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
-              PLDLMRES(res), res, cfs_atomic_read(&res->lr_refcount));
+              PLDLMRES(res), res, atomic_read(&res->lr_refcount));
 
-        if (!cfs_list_empty(&res->lr_granted)) {
-                CDEBUG(level, "Granted locks (in reverse order):\n");
-                cfs_list_for_each_entry_reverse(lock, &res->lr_granted,
-                                                l_res_link) {
+       if (!cfs_list_empty(&res->lr_granted)) {
+               CDEBUG(level, "Granted locks (in reverse order):\n");
+               cfs_list_for_each_entry_reverse(lock, &res->lr_granted,
+                                               l_res_link) {
                         LDLM_DEBUG_LIMIT(level, lock, "###");
                         if (!(level & D_CANTMASK) &&
                             ++granted > ldlm_dump_granted_max) {
index 3d02d34..b316731 100644 (file)
@@ -244,7 +244,7 @@ struct lfsck_component {
        /* into lfsck_instance::li_list_dir */
        cfs_list_t               lc_link_dir;
        struct rw_semaphore      lc_sem;
-       cfs_atomic_t             lc_ref;
+       atomic_t                 lc_ref;
 
        struct lfsck_position    lc_pos_start;
        struct lfsck_instance   *lc_lfsck;
@@ -281,7 +281,7 @@ struct lfsck_instance {
        /* For the components those are not scanning now. */
        cfs_list_t                li_list_idle;
 
-       cfs_atomic_t              li_ref;
+       atomic_t                  li_ref;
        struct ptlrpc_thread      li_thread;
 
        /* The time for last checkpoint, jiffies */