Whamcloud - gitweb
LU-3963 lnet: convert cfs_atomic primitives 70/7070/3
authorJames Simmons <uja.ornl@gmail.com>
Thu, 17 Oct 2013 13:05:43 +0000 (09:05 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 25 Oct 2013 01:56:24 +0000 (01:56 +0000)
This patch convers all cfs_atomic primitives in lnet/
directory.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: I81426bd7a8a1a97ea5f198d99193649fbbf5389a
Reviewed-on: http://review.whamcloud.com/7070
Tested-by: Hudson
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
22 files changed:
lnet/klnds/mxlnd/mxlnd.c
lnet/klnds/mxlnd/mxlnd.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/selftest/brw_test.c
lnet/selftest/conrpc.c
lnet/selftest/conrpc.h
lnet/selftest/console.h
lnet/selftest/framework.c
lnet/selftest/ping_test.c
lnet/selftest/selftest.h

index 13b5f02..6c9d7b8 100644 (file)
@@ -393,15 +393,15 @@ int
 mxlnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
        cfs_task *task;
-        int     i   = (int) ((long) arg);
+       int     i   = (int) ((long) arg);
 
-        cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
+       atomic_inc(&kmxlnd_data.kmx_nthreads);
        init_completion(&kmxlnd_data.kmx_completions[i]);
 
        task = kthread_run(fn, arg, name);
        if (IS_ERR(task)) {
                CERROR("cfs_create_thread() failed with %d\n", PTR_ERR(task));
-               cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
+               atomic_dec(&kmxlnd_data.kmx_nthreads);
        }
        return PTR_ERR(task);
 }
@@ -415,8 +415,8 @@ mxlnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 void
 mxlnd_thread_stop(long id)
 {
-        int     i       = (int) id;
-        cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
+       int     i       = (int) id;
+       atomic_dec (&kmxlnd_data.kmx_nthreads);
        complete(&kmxlnd_data.kmx_completions[i]);
 }
 
@@ -429,23 +429,22 @@ mxlnd_thread_stop(long id)
 void
 mxlnd_shutdown (lnet_ni_t *ni)
 {
-        int                     i               = 0;
-        int                     nthreads        = MXLND_NDAEMONS
-                                                  + *kmxlnd_tunables.kmx_n_waitd;
+       int     i               = 0;
+       int     nthreads        = MXLND_NDAEMONS + *kmxlnd_tunables.kmx_n_waitd;
 
-        LASSERT (ni == kmxlnd_data.kmx_ni);
-        LASSERT (ni->ni_data == &kmxlnd_data);
-        CDEBUG(D_NET, "in shutdown()\n");
+       LASSERT (ni == kmxlnd_data.kmx_ni);
+       LASSERT (ni->ni_data == &kmxlnd_data);
+       CDEBUG(D_NET, "in shutdown()\n");
 
-        CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
-                         kmxlnd_data.kmx_mem_used);
+       CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
+                        "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+                        kmxlnd_data.kmx_mem_used);
 
 
-        CDEBUG(D_NET, "setting shutdown = 1\n");
-        cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+       CDEBUG(D_NET, "setting shutdown = 1\n");
+       atomic_set(&kmxlnd_data.kmx_shutdown, 1);
 
-        switch (kmxlnd_data.kmx_init) {
+       switch (kmxlnd_data.kmx_init) {
 
         case MXLND_INIT_ALL:
 
@@ -462,12 +461,12 @@ mxlnd_shutdown (lnet_ni_t *ni)
 
         case MXLND_INIT_THREADS:
 
-                CDEBUG(D_NET, "waiting on threads\n");
-                /* wait for threads to complete */
-                for (i = 0; i < nthreads; i++) {
+               CDEBUG(D_NET, "waiting on threads\n");
+               /* wait for threads to complete */
+               for (i = 0; i < nthreads; i++) {
                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
-                }
-                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+               }
+               LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
 
                 CDEBUG(D_NET, "freeing completions\n");
                 MXLND_FREE(kmxlnd_data.kmx_completions,
@@ -511,9 +510,9 @@ mxlnd_shutdown (lnet_ni_t *ni)
         }
         CDEBUG(D_NET, "shutdown complete\n");
 
-        CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
-                         kmxlnd_data.kmx_mem_used);
+       CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
+                        "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+                        kmxlnd_data.kmx_mem_used);
 
        kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
        module_put(THIS_MODULE);
@@ -542,9 +541,9 @@ mxlnd_startup (lnet_ni_t *ni)
                 CERROR("Only 1 instance supported\n");
                 return -EPERM;
         }
-        CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
-                         "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
-                         kmxlnd_data.kmx_mem_used);
+       CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
+                        "kmx_mem_used %ld\n", atomic_read(&libcfs_kmemory),
+                        kmxlnd_data.kmx_mem_used);
 
         ni->ni_maxtxcredits = MXLND_TX_MSGS();
         ni->ni_peertxcredits = *kmxlnd_tunables.kmx_peercredits;
@@ -624,62 +623,62 @@ mxlnd_startup (lnet_ni_t *ni)
                if (ret < 0) {
                        CERROR("Starting mxlnd_request_waitd[%d] "
                                "failed with %d\n", i, ret);
-                        cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
-                        mx_wakeup(kmxlnd_data.kmx_endpt);
-                        for (--i; i >= 0; i--) {
+                       atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+                       mx_wakeup(kmxlnd_data.kmx_endpt);
+                       for (--i; i >= 0; i--) {
                                wait_for_completion(&kmxlnd_data.kmx_completions[i]);
-                        }
-                        LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
-                        MXLND_FREE(kmxlnd_data.kmx_completions,
+                       }
+                       LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+                       MXLND_FREE(kmxlnd_data.kmx_completions,
                                nthreads * sizeof(struct completion));
 
-                        goto failed;
-                }
-        }
+                       goto failed;
+               }
+       }
        ret = mxlnd_thread_start(mxlnd_tx_queued, (void *)((long)i++),
                                 "mxlnd_tx_queued");
-        if (ret < 0) {
-                CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
-                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
-                mx_wakeup(kmxlnd_data.kmx_endpt);
-                for (--i; i >= 0; i--) {
+       if (ret < 0) {
+               CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
+               atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+               mx_wakeup(kmxlnd_data.kmx_endpt);
+               for (--i; i >= 0; i--) {
                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
-                }
-                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
-                MXLND_FREE(kmxlnd_data.kmx_completions,
+               }
+               LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+               MXLND_FREE(kmxlnd_data.kmx_completions,
                        nthreads * sizeof(struct completion));
-                goto failed;
-        }
+               goto failed;
+       }
        ret = mxlnd_thread_start(mxlnd_timeoutd, (void *)((long)i++),
                                 "mxlnd_timeoutd");
-        if (ret < 0) {
-                CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
-                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
-                mx_wakeup(kmxlnd_data.kmx_endpt);
+       if (ret < 0) {
+               CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
+               atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+               mx_wakeup(kmxlnd_data.kmx_endpt);
                up(&kmxlnd_data.kmx_tx_queue_sem);
-                for (--i; i >= 0; i--) {
+               for (--i; i >= 0; i--) {
                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
-                }
-                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
-                MXLND_FREE(kmxlnd_data.kmx_completions,
+               }
+               LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+               MXLND_FREE(kmxlnd_data.kmx_completions,
                        nthreads * sizeof(struct completion));
-                goto failed;
-        }
+               goto failed;
+       }
        ret = mxlnd_thread_start(mxlnd_connd, (void *)((long)i++),
                                 "mxlnd_connd");
-        if (ret < 0) {
-                CERROR("Starting mxlnd_connd failed with %d\n", ret);
-                cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
-                mx_wakeup(kmxlnd_data.kmx_endpt);
+       if (ret < 0) {
+               CERROR("Starting mxlnd_connd failed with %d\n", ret);
+               atomic_set(&kmxlnd_data.kmx_shutdown, 1);
+               mx_wakeup(kmxlnd_data.kmx_endpt);
                up(&kmxlnd_data.kmx_tx_queue_sem);
-                for (--i; i >= 0; i--) {
+               for (--i; i >= 0; i--) {
                        wait_for_completion(&kmxlnd_data.kmx_completions[i]);
-                }
-                LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
-                MXLND_FREE(kmxlnd_data.kmx_completions,
+               }
+               LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
+               MXLND_FREE(kmxlnd_data.kmx_completions,
                        nthreads * sizeof(struct completion));
-                goto failed;
-        }
+               goto failed;
+       }
 
         kmxlnd_data.kmx_init = MXLND_INIT_THREADS;
         /*****************************************************/
index 2f1f06b..1cae4eb 100644 (file)
@@ -219,9 +219,9 @@ typedef struct
 /* global interface state */
 typedef struct kmx_data
 {
-        int                 kmx_init;           /* initialization state */
-        cfs_atomic_t        kmx_shutdown;       /* shutting down? */
-        cfs_atomic_t        kmx_nthreads;       /* number of threads */
+       int                 kmx_init;           /* initialization state */
+       atomic_t            kmx_shutdown;       /* shutting down? */
+       atomic_t            kmx_nthreads;       /* number of threads */
        struct completion   *kmx_completions;   /* array of completion struct */
        lnet_ni_t           *kmx_ni;            /* the LND instance */
        u64                 kmx_incarnation;    /* my incarnation value */
@@ -235,17 +235,17 @@ typedef struct kmx_data
        cfs_list_t          kmx_conn_reqs;      /* list of connection reqs */
        spinlock_t          kmx_conn_lock;      /* connection list lock */
        struct semaphore    kmx_conn_sem;       /* connection request list */
-        cfs_list_t          kmx_conn_zombies;   /* list of zombie connections */
-        cfs_list_t          kmx_orphan_msgs;    /* list of txs to cancel */
+       cfs_list_t          kmx_conn_zombies;   /* list of zombie connections */
+       cfs_list_t          kmx_orphan_msgs;    /* list of txs to cancel */
 
-                                                /* list of all known peers */
-        cfs_list_t          kmx_peers[MXLND_HASH_SIZE];
-        cfs_atomic_t        kmx_npeers;         /* number of peers */
+                                               /* list of all known peers */
+       cfs_list_t          kmx_peers[MXLND_HASH_SIZE];
+       atomic_t            kmx_npeers;         /* number of peers */
 
-        kmx_pages_t        *kmx_tx_pages;       /* tx msg pages */
+       kmx_pages_t        *kmx_tx_pages;       /* tx msg pages */
 
-        struct kmx_ctx     *kmx_txs;            /* all tx descriptors */
-        cfs_list_t          kmx_tx_idle;        /* list of idle tx */
+       struct kmx_ctx     *kmx_txs;            /* all tx descriptors */
+       cfs_list_t          kmx_tx_idle;        /* list of idle tx */
        spinlock_t          kmx_tx_idle_lock;   /* lock for idle tx list */
        s32                 kmx_tx_used;        /* txs in use */
        u64                 kmx_tx_next_cookie; /* unique id for tx */
@@ -406,7 +406,7 @@ typedef struct kmx_conn
         cfs_list_t          mxk_zombie;         /* for placing on zombies list */
         u64                 mxk_incarnation;    /* connections's incarnation value */
         u32                 mxk_sid;            /* peer's MX session id */
-        cfs_atomic_t        mxk_refcount;       /* reference counting */
+       atomic_t        mxk_refcount;       /* reference counting */
         int                 mxk_status;         /* can we send messages? MXLND_CONN_* */
 
         mx_endpoint_addr_t  mxk_epa;            /* peer's endpoint address */
@@ -439,7 +439,7 @@ typedef struct kmx_peer
         cfs_list_t          mxp_list;           /* for placing on kmx_peers */
         lnet_nid_t          mxp_nid;            /* peer's LNET NID */
         lnet_ni_t          *mxp_ni;             /* LNET interface */
-        cfs_atomic_t        mxp_refcount;       /* reference counts */
+       atomic_t        mxp_refcount;       /* reference counts */
 
         cfs_list_t          mxp_conns;          /* list of connections */
         kmx_conn_t         *mxp_conn;           /* current connection */
@@ -511,32 +511,32 @@ mxlnd_nid_to_hash(lnet_nid_t nid)
 
 #define mxlnd_peer_addref(peer)                                 \
 do {                                                            \
-        LASSERT(peer != NULL);                                  \
-        LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0);    \
-        cfs_atomic_inc(&(peer)->mxp_refcount);                  \
+       LASSERT(peer != NULL);                                  \
+       LASSERT(atomic_read(&(peer)->mxp_refcount) > 0);        \
+       atomic_inc(&(peer)->mxp_refcount);                      \
 } while (0)
 
 
 #define mxlnd_peer_decref(peer)                                 \
 do {                                                            \
-        LASSERT(cfs_atomic_read(&(peer)->mxp_refcount) > 0);    \
-        if (cfs_atomic_dec_and_test(&(peer)->mxp_refcount))     \
-                mxlnd_peer_free(peer);                          \
+       LASSERT(atomic_read(&(peer)->mxp_refcount) > 0);        \
+       if (atomic_dec_and_test(&(peer)->mxp_refcount))         \
+               mxlnd_peer_free(peer);                          \
 } while (0)
 
 #define mxlnd_conn_addref(conn)                                 \
 do {                                                            \
-        LASSERT(conn != NULL);                                  \
-        LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);    \
-        cfs_atomic_inc(&(conn)->mxk_refcount);                  \
+       LASSERT(conn != NULL);                                  \
+       LASSERT(atomic_read(&(conn)->mxk_refcount) > 0);        \
+       atomic_inc(&(conn)->mxk_refcount);                      \
 } while (0)
 
 
 #define mxlnd_conn_decref(conn)                                                \
 do {                                                                   \
        LASSERT(conn != NULL);                                          \
-       LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);            \
-       if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) {           \
+       LASSERT(atomic_read(&(conn)->mxk_refcount) > 0);                \
+       if (atomic_dec_and_test(&(conn)->mxk_refcount)) {               \
                spin_lock(&kmxlnd_data.kmx_conn_lock);                  \
                LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT);   \
                CDEBUG(D_NET, "adding conn %p to zombies\n", (conn));   \
index 89304d0..f2a0568 100644 (file)
@@ -545,8 +545,8 @@ mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye)
                 mxlnd_sleep(msecs_to_jiffies(20));
         }
 
-        if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
-                unsigned long   last_msg        = 0;
+       if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) {
+               unsigned long   last_msg        = 0;
 
                 /* notify LNET that we are giving up on this peer */
                 if (cfs_time_after(conn->mxk_last_rx, conn->mxk_last_tx))
@@ -614,13 +614,13 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
 
         memset(conn->mxk_rxs, 0, MXLND_RX_MSGS() * sizeof(kmx_ctx_t));
 
-        conn->mxk_peer = peer;
-        CFS_INIT_LIST_HEAD(&conn->mxk_list);
-        CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
-        cfs_atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
-                                                   and one for the caller */
-        if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
-                u64     nic_id  = 0ULL;
+       conn->mxk_peer = peer;
+       CFS_INIT_LIST_HEAD(&conn->mxk_list);
+       CFS_INIT_LIST_HEAD(&conn->mxk_zombie);
+       atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer
+                                                  and one for the caller */
+       if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) {
+               u64     nic_id  = 0ULL;
                 u32     ep_id   = 0;
 
                 /* this is localhost, set the epa and status as up */
@@ -771,18 +771,18 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx)
 void
 mxlnd_peer_free(kmx_peer_t *peer)
 {
-        CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
+       CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid));
 
-        LASSERT (cfs_atomic_read(&peer->mxp_refcount) == 0);
+       LASSERT (atomic_read(&peer->mxp_refcount) == 0);
 
-        if (!cfs_list_empty(&peer->mxp_list)) {
-                /* assume we are locked */
-                cfs_list_del_init(&peer->mxp_list);
-        }
+       if (!cfs_list_empty(&peer->mxp_list)) {
+               /* assume we are locked */
+               cfs_list_del_init(&peer->mxp_list);
+       }
 
-        MXLND_FREE(peer, sizeof (*peer));
-        cfs_atomic_dec(&kmxlnd_data.kmx_npeers);
-        return;
+       MXLND_FREE(peer, sizeof (*peer));
+       atomic_dec(&kmxlnd_data.kmx_npeers);
+       return;
 }
 
 static int
@@ -889,10 +889,10 @@ mxlnd_peer_alloc(kmx_peer_t **peerp, lnet_nid_t nid, u32 board, u32 ep_id, u64 n
 
         memset(peer, 0, sizeof(*peer));
 
-        CFS_INIT_LIST_HEAD(&peer->mxp_list);
-        peer->mxp_nid = nid;
-        /* peer->mxp_ni unused - may be used for multi-rail */
-        cfs_atomic_set(&peer->mxp_refcount, 1);     /* ref for kmx_peers list */
+       CFS_INIT_LIST_HEAD(&peer->mxp_list);
+       peer->mxp_nid = nid;
+       /* peer->mxp_ni unused - may be used for multi-rail */
+       atomic_set(&peer->mxp_refcount, 1);     /* ref for kmx_peers list */
 
         peer->mxp_board = board;
         peer->mxp_ep_id = ep_id;
@@ -1008,12 +1008,12 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
                 mxlnd_peer_decref(peer);
                 peer = old;
         } else {
-                /* no other peer, use this one */
-                cfs_list_add_tail(&peer->mxp_list,
-                                  &kmxlnd_data.kmx_peers[hash]);
-                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
-                mxlnd_peer_addref(peer);
-                mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
+               /* no other peer, use this one */
+               cfs_list_add_tail(&peer->mxp_list,
+                                 &kmxlnd_data.kmx_peers[hash]);
+               atomic_inc(&kmxlnd_data.kmx_npeers);
+               mxlnd_peer_addref(peer);
+               mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
         }
 
        write_unlock(g_lock);
@@ -1481,12 +1481,12 @@ mxlnd_get_peer_info(int index, lnet_nid_t *nidp, int *count)
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
                 cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
                                         mxp_list) {
-                        if (index-- == 0) {
-                                *nidp = peer->mxp_nid;
-                                *count = cfs_atomic_read(&peer->mxp_refcount);
-                                ret = 0;
-                                break;
-                        }
+                       if (index-- == 0) {
+                               *nidp = peer->mxp_nid;
+                               *count = atomic_read(&peer->mxp_refcount);
+                               ret = 0;
+                               break;
+                       }
                 }
         }
        read_unlock(&kmxlnd_data.kmx_global_lock);
@@ -2554,9 +2554,9 @@ mxlnd_tx_queued(void *arg)
        spinlock_t              *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
        rwlock_t                *g_lock  = &kmxlnd_data.kmx_global_lock;
 
-       while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+       while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
                ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
-               if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+               if (atomic_read(&kmxlnd_data.kmx_shutdown))
                        break;
                if (ret != 0) /* Should we check for -EINTR? */
                        continue;
@@ -2635,11 +2635,11 @@ mxlnd_tx_queued(void *arg)
                                 }
                         }
 
-                        if (found == 0) {
-                                cfs_list_add_tail(&peer->mxp_list,
-                                                  &kmxlnd_data.kmx_peers[hash]);
-                                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
-                        } else {
+                       if (found == 0) {
+                               cfs_list_add_tail(&peer->mxp_list,
+                                                 &kmxlnd_data.kmx_peers[hash]);
+                               atomic_inc(&kmxlnd_data.kmx_npeers);
+                       } else {
                                 tx->mxc_peer = old;
                                 tx->mxc_conn = old->mxp_conn;
                                 LASSERT(old->mxp_conn != NULL);
@@ -3498,8 +3498,8 @@ mxlnd_request_waitd(void *arg)
 
         CDEBUG(D_NET, "%s starting\n", name);
 
-        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                u8      msg_type        = 0;
+       while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+               u8      msg_type        = 0;
 
                 mxret = MX_SUCCESS;
                 result = 0;
@@ -3516,8 +3516,8 @@ mxlnd_request_waitd(void *arg)
                 mxret = mx_wait_any(kmxlnd_data.kmx_endpt, MXLND_WAIT_TIMEOUT,
                                     0ULL, 0ULL, &status, &result);
 #endif
-                if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown)))
-                        break;
+               if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown)))
+                       break;
 
                 if (result != 1) {
                         /* nothing completed... */
@@ -3604,10 +3604,10 @@ mxlnd_check_timeouts(unsigned long now)
                cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
                                        mxp_list) {
 
-                       if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+                       if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) {
                                read_unlock(g_lock);
-                                return next;
-                        }
+                               return next;
+                       }
 
                         conn = peer->mxp_conn;
                         if (conn) {
@@ -3732,11 +3732,11 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                                 peer = existing_peer;
                                 mxlnd_conn_addref(peer->mxp_conn);
                                 conn = peer->mxp_conn;
-                        } else {
-                                cfs_list_add_tail(&peer->mxp_list,
-                                                  &kmxlnd_data.kmx_peers[hash]);
-                                cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
-                        }
+                       } else {
+                               cfs_list_add_tail(&peer->mxp_list,
+                                                 &kmxlnd_data.kmx_peers[hash]);
+                               atomic_inc(&kmxlnd_data.kmx_npeers);
+                       }
                        write_unlock(g_lock);
                 } else {
                         ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
@@ -3956,20 +3956,20 @@ mxlnd_free_conn_zombies(void)
 int
 mxlnd_connd(void *arg)
 {
-        long                    id              = (long) arg;
+       long                    id              = (long) arg;
 
-        CDEBUG(D_NET, "connd starting\n");
+       CDEBUG(D_NET, "connd starting\n");
 
-        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                int                ret             = 0;
-                kmx_connparams_t  *cp              = NULL;
+       while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
+               int                ret             = 0;
+               kmx_connparams_t  *cp              = NULL;
                spinlock_t        *g_conn_lock  = &kmxlnd_data.kmx_conn_lock;
                cfs_list_t        *conn_reqs    = &kmxlnd_data.kmx_conn_reqs;
 
                ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
 
-                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
-                        break;
+               if (atomic_read(&kmxlnd_data.kmx_shutdown))
+                       break;
 
                 if (ret != 0)
                         continue;
@@ -4033,7 +4033,7 @@ mxlnd_timeoutd(void *arg)
 
         CDEBUG(D_NET, "timeoutd starting\n");
 
-        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+       while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) {
 
                 now = jiffies;
                 /* if the next timeout has not arrived, go back to sleep */
@@ -4050,7 +4050,7 @@ mxlnd_timeoutd(void *arg)
                          * not against the removal of temp */
                         cfs_list_for_each_entry_safe(peer, temp, peers,
                                                      mxp_list) {
-                                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+                               if (atomic_read(&kmxlnd_data.kmx_shutdown))
                                         break;
                                 mxlnd_peer_addref(peer); /* add ref... */
                                 conn = peer->mxp_conn;
index a30c7ca..76c3e91 100644 (file)
@@ -346,7 +346,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         peer->ibp_nid = nid;
         peer->ibp_error = 0;
         peer->ibp_last_alive = 0;
-        cfs_atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
+       atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
 
         CFS_INIT_LIST_HEAD(&peer->ibp_list);     /* not in the peer table yet */
         CFS_INIT_LIST_HEAD(&peer->ibp_conns);
@@ -358,7 +358,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         LASSERT (net->ibn_shutdown == 0);
 
         /* npeers only grows with the global lock held */
-        cfs_atomic_inc(&net->ibn_npeers);
+       atomic_inc(&net->ibn_npeers);
 
        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -372,7 +372,7 @@ kiblnd_destroy_peer (kib_peer_t *peer)
         kib_net_t *net = peer->ibp_ni->ni_data;
 
         LASSERT (net != NULL);
-        LASSERT (cfs_atomic_read(&peer->ibp_refcount) == 0);
+       LASSERT (atomic_read(&peer->ibp_refcount) == 0);
         LASSERT (!kiblnd_peer_active(peer));
         LASSERT (peer->ibp_connecting == 0);
         LASSERT (peer->ibp_accepting == 0);
@@ -385,7 +385,7 @@ kiblnd_destroy_peer (kib_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        cfs_atomic_dec(&net->ibn_npeers);
+       atomic_dec(&net->ibn_npeers);
 }
 
 kib_peer_t *
@@ -410,7 +410,7 @@ kiblnd_find_peer_locked (lnet_nid_t nid)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
                        peer, libcfs_nid2str(nid),
-                       cfs_atomic_read(&peer->ibp_refcount),
+                      atomic_read(&peer->ibp_refcount),
                        peer->ibp_version);
                 return peer;
         }
@@ -455,7 +455,7 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
                                 continue;
 
                         *nidp = peer->ibp_nid;
-                        *count = cfs_atomic_read(&peer->ibp_refcount);
+                       *count = atomic_read(&peer->ibp_refcount);
 
                        read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                               flags);
@@ -612,7 +612,7 @@ kiblnd_debug_conn (kib_conn_t *conn)
        spin_lock(&conn->ibc_lock);
 
         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
-               cfs_atomic_read(&conn->ibc_refcount), conn,
+              atomic_read(&conn->ibc_refcount), conn,
                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
                conn->ibc_state, conn->ibc_noops_posted,
@@ -873,7 +873,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
 
         /* 1 ref for caller and each rxmsg */
-        cfs_atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+       atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
         conn->ibc_nrx = IBLND_RX_MSGS(version);
 
         /* post receives */
@@ -912,7 +912,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         conn->ibc_state = state;
 
         /* 1 more conn */
-        cfs_atomic_inc(&net->ibn_nconns);
+       atomic_inc(&net->ibn_nconns);
         return conn;
 
  failed_2:
@@ -931,7 +931,7 @@ kiblnd_destroy_conn (kib_conn_t *conn)
        int                rc;
 
        LASSERT (!in_interrupt());
-       LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
+       LASSERT (atomic_read(&conn->ibc_refcount) == 0);
        LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
        LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
        LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
@@ -985,7 +985,7 @@ kiblnd_destroy_conn (kib_conn_t *conn)
 
                kiblnd_peer_decref(peer);
                rdma_destroy_id(cmid);
-               cfs_atomic_dec(&net->ibn_nconns);
+               atomic_dec(&net->ibn_nconns);
        }
 
        LIBCFS_FREE(conn, sizeof(*conn));
@@ -2816,7 +2816,7 @@ kiblnd_base_shutdown(void)
         LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
 
         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         switch (kiblnd_data.kib_init) {
         default:
@@ -2844,11 +2844,11 @@ kiblnd_base_shutdown(void)
                wake_up_all(&kiblnd_data.kib_failover_waitq);
 
                i = 2;
-               while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
-                               cfs_atomic_read(&kiblnd_data.kib_nthreads));
+                              atomic_read(&kiblnd_data.kib_nthreads));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2868,7 +2868,7 @@ kiblnd_base_shutdown(void)
                cfs_percpt_free(kiblnd_data.kib_scheds);
 
         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        kiblnd_data.kib_init = IBLND_INIT_NOTHING;
        module_put(THIS_MODULE);
@@ -2888,7 +2888,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 goto out;
 
         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        write_lock_irqsave(g_lock, flags);
        net->ibn_shutdown = 1;
@@ -2904,12 +2904,12 @@ kiblnd_shutdown (lnet_ni_t *ni)
 
                 /* Wait for all peer state to clean up */
                 i = 2;
-                while (cfs_atomic_read(&net->ibn_npeers) != 0) {
+               while (atomic_read(&net->ibn_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
                                "%s: waiting for %d peers to disconnect\n",
                                libcfs_nid2str(ni->ni_nid),
-                               cfs_atomic_read(&net->ibn_npeers));
+                              atomic_read(&net->ibn_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2924,7 +2924,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 /* fall through */
 
         case IBLND_INIT_NOTHING:
-                LASSERT (cfs_atomic_read(&net->ibn_nconns) == 0);
+               LASSERT (atomic_read(&net->ibn_nconns) == 0);
 
                 if (net->ibn_dev != NULL &&
                     net->ibn_dev->ibd_nnets == 0)
@@ -2934,7 +2934,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
         }
 
         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         net->ibn_init = IBLND_INIT_NOTHING;
         ni->ni_data = NULL;
index f2c1f77..9ca97d0 100644 (file)
@@ -221,18 +221,18 @@ typedef struct
 
 typedef struct kib_hca_dev
 {
-        struct rdma_cm_id   *ibh_cmid;          /* listener cmid */
-        struct ib_device    *ibh_ibdev;         /* IB device */
-        int                  ibh_page_shift;    /* page shift of current HCA */
-        int                  ibh_page_size;     /* page size of current HCA */
-        __u64                ibh_page_mask;     /* page mask of current HCA */
-        int                  ibh_mr_shift;      /* bits shift of max MR size */
-        __u64                ibh_mr_size;       /* size of MR */
-        int                  ibh_nmrs;          /* # of global MRs */
-        struct ib_mr       **ibh_mrs;           /* global MR */
-        struct ib_pd        *ibh_pd;            /* PD */
-        kib_dev_t           *ibh_dev;           /* owner */
-        cfs_atomic_t         ibh_ref;           /* refcount */
+       struct rdma_cm_id   *ibh_cmid;          /* listener cmid */
+       struct ib_device    *ibh_ibdev;         /* IB device */
+       int                  ibh_page_shift;    /* page shift of current HCA */
+       int                  ibh_page_size;     /* page size of current HCA */
+       __u64                ibh_page_mask;     /* page mask of current HCA */
+       int                  ibh_mr_shift;      /* bits shift of max MR size */
+       __u64                ibh_mr_size;       /* size of MR */
+       int                  ibh_nmrs;          /* # of global MRs */
+       struct ib_mr       **ibh_mrs;           /* global MR */
+       struct ib_pd        *ibh_pd;            /* PD */
+       kib_dev_t           *ibh_dev;           /* owner */
+       atomic_t             ibh_ref;           /* refcount */
 } kib_hca_dev_t;
 
 /** # of seconds to keep pool alive */
@@ -354,13 +354,13 @@ typedef struct {
 
 typedef struct kib_net
 {
-        cfs_list_t           ibn_list;          /* chain on kib_dev_t::ibd_nets */
-        __u64                ibn_incarnation;   /* my epoch */
-        int                  ibn_init;          /* initialisation state */
-        int                  ibn_shutdown;      /* shutting down? */
+       cfs_list_t           ibn_list;          /* chain on kib_dev_t::ibd_nets */
+       __u64                ibn_incarnation;   /* my epoch */
+       int                  ibn_init;          /* initialisation state */
+       int                  ibn_shutdown;      /* shutting down? */
 
-       cfs_atomic_t            ibn_npeers;     /* # peers extant */
-       cfs_atomic_t            ibn_nconns;     /* # connections extant */
+       atomic_t                ibn_npeers;     /* # peers extant */
+       atomic_t                ibn_nconns;     /* # connections extant */
 
        kib_tx_poolset_t        **ibn_tx_ps;    /* tx pool-set */
        kib_fmr_poolset_t       **ibn_fmr_ps;   /* fmr pool-set */
@@ -397,7 +397,7 @@ typedef struct
        cfs_list_t              kib_failed_devs;
        /* schedulers sleep here */
        wait_queue_head_t               kib_failover_waitq;
-       cfs_atomic_t            kib_nthreads;   /* # live threads */
+       atomic_t                kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
        rwlock_t                kib_global_lock;
        /* hash table of all my known peers */
@@ -605,7 +605,7 @@ typedef struct kib_conn
         cfs_list_t           ibc_sched_list;    /* schedule for attention */
         __u16                ibc_version;       /* version of connection */
         __u64                ibc_incarnation;   /* which instance of the peer */
-        cfs_atomic_t         ibc_refcount;      /* # users */
+       atomic_t             ibc_refcount;      /* # users */
         int                  ibc_state;         /* what's happening */
         int                  ibc_nsends_posted; /* # uncompleted sends */
         int                  ibc_noops_posted;  /* # uncompleted NOOPs */
@@ -650,7 +650,7 @@ typedef struct kib_peer
         cfs_list_t           ibp_list;           /* stash on global peer list */
         lnet_nid_t           ibp_nid;            /* who's on the other end(s) */
         lnet_ni_t           *ibp_ni;             /* LNet interface */
-        cfs_atomic_t         ibp_refcount;       /* # users */
+       atomic_t         ibp_refcount;       /* # users */
         cfs_list_t           ibp_conns;          /* all active connections */
         cfs_list_t           ibp_tx_queue;       /* msgs waiting for a conn */
         __u16                ibp_version;        /* version of peer */
@@ -668,16 +668,16 @@ extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 static inline void
 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
 {
-        LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
-        cfs_atomic_inc(&hdev->ibh_ref);
+       LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+       atomic_inc(&hdev->ibh_ref);
 }
 
 static inline void
 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
 {
-        LASSERT (cfs_atomic_read(&hdev->ibh_ref) > 0);
-        if (cfs_atomic_dec_and_test(&hdev->ibh_ref))
-                kiblnd_hdev_destroy(hdev);
+       LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+       if (atomic_dec_and_test(&hdev->ibh_ref))
+               kiblnd_hdev_destroy(hdev);
 }
 
 static inline int
@@ -698,8 +698,8 @@ kiblnd_dev_can_failover(kib_dev_t *dev)
 #define kiblnd_conn_addref(conn)                                \
 do {                                                            \
         CDEBUG(D_NET, "conn[%p] (%d)++\n",                      \
-               (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
-        cfs_atomic_inc(&(conn)->ibc_refcount);                  \
+              (conn), atomic_read(&(conn)->ibc_refcount)); \
+       atomic_inc(&(conn)->ibc_refcount);                  \
 } while (0)
 
 #define kiblnd_conn_decref(conn)                                       \
@@ -707,9 +707,9 @@ do {                                                                        \
        unsigned long flags;                                            \
                                                                        \
        CDEBUG(D_NET, "conn[%p] (%d)--\n",                              \
-              (conn), cfs_atomic_read(&(conn)->ibc_refcount));         \
+              (conn), atomic_read(&(conn)->ibc_refcount));             \
        LASSERT_ATOMIC_POS(&(conn)->ibc_refcount);                      \
-       if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) {           \
+       if (atomic_dec_and_test(&(conn)->ibc_refcount)) {               \
                spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
                cfs_list_add_tail(&(conn)->ibc_list,                    \
                                  &kiblnd_data.kib_connd_zombies);      \
@@ -720,20 +720,20 @@ do {                                                                      \
 
 #define kiblnd_peer_addref(peer)                                \
 do {                                                            \
-        CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                \
-               (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               cfs_atomic_read (&(peer)->ibp_refcount));        \
-        cfs_atomic_inc(&(peer)->ibp_refcount);                  \
+       CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                \
+              (peer), libcfs_nid2str((peer)->ibp_nid),         \
+              atomic_read (&(peer)->ibp_refcount));            \
+       atomic_inc(&(peer)->ibp_refcount);                      \
 } while (0)
 
 #define kiblnd_peer_decref(peer)                                \
 do {                                                            \
-        CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                \
-               (peer), libcfs_nid2str((peer)->ibp_nid),         \
-               cfs_atomic_read (&(peer)->ibp_refcount));        \
-        LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);              \
-        if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount))     \
-                kiblnd_destroy_peer(peer);                      \
+       CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                \
+              (peer), libcfs_nid2str((peer)->ibp_nid),         \
+              atomic_read (&(peer)->ibp_refcount));            \
+       LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);              \
+       if (atomic_dec_and_test(&(peer)->ibp_refcount))         \
+               kiblnd_destroy_peer(peer);                      \
 } while (0)
 
 static inline cfs_list_t *
index e4f2d33..eeef534 100644 (file)
@@ -1812,14 +1812,14 @@ kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       cfs_atomic_inc(&kiblnd_data.kib_nthreads);
+       atomic_inc(&kiblnd_data.kib_nthreads);
        return 0;
 }
 
 void
 kiblnd_thread_fini (void)
 {
-        cfs_atomic_dec (&kiblnd_data.kib_nthreads);
+       atomic_dec (&kiblnd_data.kib_nthreads);
 }
 
 void
index e62b094..fa8e8f4 100644 (file)
@@ -144,9 +144,9 @@ kqswnal_shutdown(lnet_ni_t *ni)
 
        /**********************************************************************/
        /* wait for sends that have allocated a tx desc to launch or give up */
-       while (cfs_atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
+       while (atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {
                CDEBUG(D_NET, "waiting for %d pending sends\n",
-                      cfs_atomic_read (&kqswnal_data.kqn_pending_txs));
+                      atomic_read (&kqswnal_data.kqn_pending_txs));
                cfs_pause(cfs_time_seconds(1));
        }
 
@@ -176,9 +176,9 @@ kqswnal_shutdown(lnet_ni_t *ni)
        kqswnal_data.kqn_shuttingdown = 2;
        wake_up_all (&kqswnal_data.kqn_sched_waitq);
 
-       while (cfs_atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
+       while (atomic_read (&kqswnal_data.kqn_nthreads) != 0) {
                CDEBUG(D_NET, "waiting for %d threads to terminate\n",
-                      cfs_atomic_read (&kqswnal_data.kqn_nthreads));
+                      atomic_read (&kqswnal_data.kqn_nthreads));
                cfs_pause(cfs_time_seconds(1));
        }
 
@@ -252,7 +252,7 @@ kqswnal_shutdown(lnet_ni_t *ni)
        /* resets flags, pointers to NULL etc */
        memset(&kqswnal_data, 0, sizeof (kqswnal_data));
 
-       CDEBUG (D_MALLOC, "done kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
+       CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&libcfs_kmemory));
 
        module_put(THIS_MODULE);
 }
@@ -288,7 +288,7 @@ kqswnal_startup (lnet_ni_t *ni)
                                   *kqswnal_tunables.kqn_credits);
        }
         
-       CDEBUG (D_MALLOC, "start kmem %d\n", cfs_atomic_read(&libcfs_kmemory));
+       CDEBUG (D_MALLOC, "start kmem %d\n", atomic_read(&libcfs_kmemory));
        
        /* ensure all pointers NULL etc */
        memset (&kqswnal_data, 0, sizeof (kqswnal_data));
index a55ba12..b45c2cd 100644 (file)
@@ -182,7 +182,7 @@ typedef struct kqswnal_rx
         int                  krx_nob;       /* Number Of Bytes received into buffer */
         int                  krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
         int                  krx_state;     /* what this RX is doing */
-        cfs_atomic_t         krx_refcount;  /* how to tell when rpc is done */
+       atomic_t         krx_refcount;  /* how to tell when rpc is done */
 #if KQSW_CKSUM
         __u32                krx_cksum;     /* checksum */
 #endif
@@ -256,7 +256,7 @@ typedef struct
 {
        char                 kqn_init;        /* what's been initialised */
        char                 kqn_shuttingdown;/* I'm trying to shut down */
-       cfs_atomic_t         kqn_nthreads;    /* # threads running */
+       atomic_t        kqn_nthreads;    /* # threads running */
        lnet_ni_t           *kqn_ni;          /* _the_ instance of me */
 
        kqswnal_rx_t        *kqn_rxds;        /* stack of all the receive descriptors */
@@ -265,7 +265,7 @@ typedef struct
        cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
        cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
        spinlock_t      kqn_idletxd_lock;    /* serialise idle txd access */
-       cfs_atomic_t    kqn_pending_txs;     /* # transmits being prepped */
+       atomic_t        kqn_pending_txs;     /* # transmits being prepped */
 
        spinlock_t      kqn_sched_lock;      /* serialise packet schedulers */
        wait_queue_head_t    kqn_sched_waitq;/* scheduler blocks here */
@@ -336,8 +336,8 @@ kqswnal_pages_spanned (void *base, int nob)
 
 static inline void kqswnal_rx_decref (kqswnal_rx_t *krx)
 {
-        LASSERT (cfs_atomic_read (&krx->krx_refcount) > 0);
-        if (cfs_atomic_dec_and_test (&krx->krx_refcount))
+       LASSERT (atomic_read (&krx->krx_refcount) > 0);
+       if (atomic_dec_and_test (&krx->krx_refcount))
                 kqswnal_rx_done(krx);
 }
 
index 7c2cbfb..99eb1cc 100644 (file)
@@ -386,7 +386,7 @@ kqswnal_get_idle_tx (void)
 
         cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
         ktx->ktx_launcher = current->pid;
-        cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
+       atomic_inc(&kqswnal_data.kqn_pending_txs);
 
        spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
 
@@ -899,9 +899,9 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
         ktx->ktx_args[0] = krx;
         ktx->ktx_args[1] = lntmsg;
 
-        LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
+       LASSERT (atomic_read(&krx->krx_refcount) > 0);
         /* Take an extra ref for the completion callback */
-        cfs_atomic_inc(&krx->krx_refcount);
+       atomic_inc(&krx->krx_refcount);
 
         /* Map on the rail the RPC prefers */
         ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
@@ -978,7 +978,7 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg,
                 kqswnal_put_idle_tx (ktx);
         }
 
-        cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+       atomic_dec(&kqswnal_data.kqn_pending_txs);
         return (rc);
 }
 
@@ -1254,14 +1254,14 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                 
         }
         
-        cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+       atomic_dec(&kqswnal_data.kqn_pending_txs);
         return (rc == 0 ? 0 : -EIO);
 }
 
 void
 kqswnal_requeue_rx (kqswnal_rx_t *krx)
 {
-        LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+       LASSERT (atomic_read(&krx->krx_refcount) == 0);
         LASSERT (!krx->krx_rpc_reply_needed);
 
         krx->krx_state = KRX_POSTED;
@@ -1298,7 +1298,7 @@ kqswnal_rx_done (kqswnal_rx_t *krx)
 {
        int           rc;
 
-       LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+       LASSERT (atomic_read(&krx->krx_refcount) == 0);
 
        if (krx->krx_rpc_reply_needed) {
                /* We've not completed the peer's RPC yet... */
@@ -1333,7 +1333,7 @@ kqswnal_parse (kqswnal_rx_t *krx)
         int             nob;
         int             rc;
 
-        LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
+       LASSERT (atomic_read(&krx->krx_refcount) == 1);
 
         if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
                 CERROR("Short message %d received from %s\n",
@@ -1521,7 +1521,7 @@ kqswnal_rxhandler(EP_RXD *rxd)
 
         /* Default to failure if an RPC reply is requested but not handled */
         krx->krx_rpc_reply.msg.status = -EPROTO;
-        cfs_atomic_set (&krx->krx_refcount, 1);
+       atomic_set (&krx->krx_refcount, 1);
 
         if (status != EP_SUCCESS) {
                 /* receives complete with failure when receiver is removed */
@@ -1662,14 +1662,14 @@ kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       cfs_atomic_inc(&kqswnal_data.kqn_nthreads);
+       atomic_inc(&kqswnal_data.kqn_nthreads);
        return 0;
 }
 
 void
 kqswnal_thread_fini (void)
 {
-        cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
+       atomic_dec (&kqswnal_data.kqn_nthreads);
 }
 
 int
@@ -1735,7 +1735,7 @@ kqswnal_scheduler (void *arg)
                                        libcfs_nid2str(ktx->ktx_nid), rc);
                                 kqswnal_tx_done (ktx, rc);
                         }
-                        cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
+                       atomic_dec (&kqswnal_data.kqn_pending_txs);
 
                         did_something = 1;
                        spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
index 6a75c2e..a6d61e2 100644 (file)
@@ -304,7 +304,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
                return -ENOMEM;
 
         memset(conn, 0, sizeof(*conn));
-        cfs_atomic_set(&conn->rac_refcount, 1);
+       atomic_set(&conn->rac_refcount, 1);
         CFS_INIT_LIST_HEAD(&conn->rac_list);
         CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
         CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
@@ -327,7 +327,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
                 return -ENETDOWN;
         }
 
-        cfs_atomic_inc(&kranal_data.kra_nconns);
+       atomic_inc(&kranal_data.kra_nconns);
         *connp = conn;
         return 0;
 }
@@ -342,7 +342,7 @@ kranal_destroy_conn(kra_conn_t *conn)
        LASSERT (cfs_list_empty(&conn->rac_list));
        LASSERT (cfs_list_empty(&conn->rac_hashlist));
        LASSERT (cfs_list_empty(&conn->rac_schedlist));
-       LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
+       LASSERT (atomic_read(&conn->rac_refcount) == 0);
        LASSERT (cfs_list_empty(&conn->rac_fmaq));
        LASSERT (cfs_list_empty(&conn->rac_rdmaq));
        LASSERT (cfs_list_empty(&conn->rac_replyq));
@@ -355,7 +355,7 @@ kranal_destroy_conn(kra_conn_t *conn)
                kranal_peer_decref(conn->rac_peer);
 
        LIBCFS_FREE(conn, sizeof(*conn));
-       cfs_atomic_dec(&kranal_data.kra_nconns);
+       atomic_dec(&kranal_data.kra_nconns);
 }
 
 void
@@ -913,7 +913,7 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
 
         peer->rap_nid = nid;
-        cfs_atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
+       atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
 
         CFS_INIT_LIST_HEAD(&peer->rap_list);
         CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
@@ -934,7 +934,7 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
                 return -ESHUTDOWN;
         }
 
-        cfs_atomic_inc(&kranal_data.kra_npeers);
+       atomic_inc(&kranal_data.kra_npeers);
 
        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
@@ -948,7 +948,7 @@ kranal_destroy_peer (kra_peer_t *peer)
         CDEBUG(D_NET, "peer %s %p deleted\n", 
                libcfs_nid2str(peer->rap_nid), peer);
 
-        LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
+       LASSERT (atomic_read(&peer->rap_refcount) == 0);
         LASSERT (peer->rap_persistence == 0);
         LASSERT (!kranal_peer_active(peer));
         LASSERT (!peer->rap_connecting);
@@ -962,7 +962,7 @@ kranal_destroy_peer (kra_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        cfs_atomic_dec(&kranal_data.kra_npeers);
+       atomic_dec(&kranal_data.kra_npeers);
 }
 
 kra_peer_t *
@@ -984,7 +984,7 @@ kranal_find_peer_locked (lnet_nid_t nid)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
                        peer, libcfs_nid2str(nid), 
-                       cfs_atomic_read(&peer->rap_refcount));
+                      atomic_read(&peer->rap_refcount));
                 return peer;
         }
         return NULL;
@@ -1174,8 +1174,8 @@ kranal_get_conn_by_idx (int index)
                                                       rac_list);
                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
                                        libcfs_nid2str(conn->rac_peer->rap_nid),
-                                       cfs_atomic_read(&conn->rac_refcount));
-                                cfs_atomic_inc(&conn->rac_refcount);
+                                      atomic_read(&conn->rac_refcount));
+                               atomic_inc(&conn->rac_refcount);
                                read_unlock(&kranal_data.kra_global_lock);
                                 return conn;
                         }
@@ -1437,7 +1437,7 @@ kranal_shutdown (lnet_ni_t *ni)
         unsigned long flags;
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         LASSERT (ni == kranal_data.kra_ni);
         LASSERT (ni->ni_data == &kranal_data);
@@ -1475,11 +1475,11 @@ kranal_shutdown (lnet_ni_t *ni)
 
                 /* Wait for all peers to be freed */
                 i = 2;
-                while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
+               while (atomic_read(&kranal_data.kra_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
                                "waiting for %d peers to close down\n",
-                               cfs_atomic_read(&kranal_data.kra_npeers));
+                              atomic_read(&kranal_data.kra_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
                 /* fall through */
@@ -1493,7 +1493,7 @@ kranal_shutdown (lnet_ni_t *ni)
          * while there are still active connds, but these will be temporary
          * since peer creation always fails after the listener has started to
          * shut down. */
-        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
         
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
@@ -1517,15 +1517,15 @@ kranal_shutdown (lnet_ni_t *ni)
 
         /* Wait for threads to exit */
         i = 2;
-        while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
+       while (atomic_read(&kranal_data.kra_nthreads) != 0) {
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                        "Waiting for %d threads to terminate\n",
-                       cfs_atomic_read(&kranal_data.kra_nthreads));
+                      atomic_read(&kranal_data.kra_nthreads));
                 cfs_pause(cfs_time_seconds(1));
         }
 
-        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
         if (kranal_data.kra_peers != NULL) {
                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
                         LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
@@ -1535,7 +1535,7 @@ kranal_shutdown (lnet_ni_t *ni)
                             kranal_data.kra_peer_hash_size);
         }
 
-        LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
         if (kranal_data.kra_conns != NULL) {
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
                         LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
@@ -1551,7 +1551,7 @@ kranal_shutdown (lnet_ni_t *ni)
         kranal_free_txdescs(&kranal_data.kra_idle_txs);
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        kranal_data.kra_init = RANAL_INIT_NOTHING;
        module_put(THIS_MODULE);
@@ -1561,7 +1561,7 @@ int
 kranal_startup (lnet_ni_t *ni)
 {
         struct timeval    tv;
-        int               pkmem = cfs_atomic_read(&libcfs_kmemory);
+       int               pkmem = atomic_read(&libcfs_kmemory);
         int               rc;
         int               i;
         kra_device_t     *dev;
index bfc863f..72559ee 100644 (file)
@@ -125,7 +125,7 @@ typedef struct
 {
        int               kra_init;            /* initialisation state */
        int               kra_shutdown;        /* shut down? */
-       cfs_atomic_t      kra_nthreads;        /* # live threads */
+       atomic_t      kra_nthreads;        /* # live threads */
        lnet_ni_t        *kra_ni;              /* _the_ nal instance */
 
        kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
@@ -135,7 +135,7 @@ typedef struct
 
        cfs_list_t       *kra_peers;           /* hash table of all my known peers */
        int               kra_peer_hash_size;  /* size of kra_peers */
-       cfs_atomic_t      kra_npeers;          /* # peers extant */
+       atomic_t      kra_npeers;          /* # peers extant */
        int               kra_nonewpeers;      /* prevent new peers */
 
        cfs_list_t       *kra_conns;           /* conns hashed by cqid */
@@ -143,7 +143,7 @@ typedef struct
        __u64             kra_peerstamp;       /* when I started up */
        __u64             kra_connstamp;       /* conn stamp generator */
        int               kra_next_cqid;       /* cqid generator */
-       cfs_atomic_t      kra_nconns;          /* # connections extant */
+       atomic_t      kra_nconns;          /* # connections extant */
 
        long              kra_new_min_timeout; /* minimum timeout on any new conn */
        wait_queue_head_t       kra_reaper_waitq;    /* reaper sleeps here */
@@ -305,7 +305,7 @@ typedef struct kra_conn
         __u32               rac_cqid;          /* my completion callback id (non-unique) */
         __u32               rac_tx_seq;        /* tx msg sequence number */
         __u32               rac_rx_seq;        /* rx msg sequence number */
-        cfs_atomic_t        rac_refcount;      /* # users */
+       atomic_t        rac_refcount;      /* # users */
         unsigned int        rac_close_sent;    /* I've sent CLOSE */
         unsigned int        rac_close_recvd;   /* I've received CLOSE */
         unsigned int        rac_state;         /* connection state */
@@ -329,7 +329,7 @@ typedef struct kra_peer {
        lnet_nid_t          rap_nid;          /* who's on the other end(s) */
        __u32               rap_ip;           /* IP address of peer */
        int                 rap_port;         /* port on which peer listens */
-       cfs_atomic_t        rap_refcount;     /* # users */
+       atomic_t        rap_refcount;     /* # users */
        int                 rap_persistence;  /* "known" peer refs */
        int                 rap_connecting;   /* connection forming */
        unsigned long       rap_reconnect_time; /* get_seconds() when reconnect OK */
@@ -346,16 +346,16 @@ static inline void
 kranal_peer_addref(kra_peer_t *peer)
 {
         CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
-        LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
-        cfs_atomic_inc(&peer->rap_refcount);
+       LASSERT(atomic_read(&peer->rap_refcount) > 0);
+       atomic_inc(&peer->rap_refcount);
 }
 
 static inline void
 kranal_peer_decref(kra_peer_t *peer)
 {
         CDEBUG(D_NET, "%p->%s\n", peer, libcfs_nid2str(peer->rap_nid));
-        LASSERT(cfs_atomic_read(&peer->rap_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&peer->rap_refcount))
+       LASSERT(atomic_read(&peer->rap_refcount) > 0);
+       if (atomic_dec_and_test(&peer->rap_refcount))
                 kranal_destroy_peer(peer);
 }
 
@@ -379,8 +379,8 @@ kranal_conn_addref(kra_conn_t *conn)
 {
         CDEBUG(D_NET, "%p->%s\n", conn, 
                libcfs_nid2str(conn->rac_peer->rap_nid));
-        LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
-        cfs_atomic_inc(&conn->rac_refcount);
+       LASSERT(atomic_read(&conn->rac_refcount) > 0);
+       atomic_inc(&conn->rac_refcount);
 }
 
 static inline void
@@ -388,8 +388,8 @@ kranal_conn_decref(kra_conn_t *conn)
 {
         CDEBUG(D_NET, "%p->%s\n", conn,
                libcfs_nid2str(conn->rac_peer->rap_nid));
-        LASSERT(cfs_atomic_read(&conn->rac_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&conn->rac_refcount))
+       LASSERT(atomic_read(&conn->rac_refcount) > 0);
+       if (atomic_dec_and_test(&conn->rac_refcount))
                 kranal_destroy_conn(conn);
 }
 
index be64cd9..ff437f6 100644 (file)
@@ -896,14 +896,14 @@ kranal_thread_start(int(*fn)(void *arg), void *arg, char *name)
        struct task_struct *task = cfs_thread_run(fn, arg, name);
 
        if (!IS_ERR(task))
-               cfs_atomic_inc(&kranal_data.kra_nthreads);
+               atomic_inc(&kranal_data.kra_nthreads);
        return PTR_ERR(task);
 }
 
 void
 kranal_thread_fini (void)
 {
-        cfs_atomic_dec(&kranal_data.kra_nthreads);
+       atomic_dec(&kranal_data.kra_nthreads);
 }
 
 int
index 54e1b27..ff6be40 100644 (file)
@@ -67,16 +67,16 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
 ksock_route_t *
 ksocknal_create_route (__u32 ipaddr, int port)
 {
-        ksock_route_t *route;
+       ksock_route_t *route;
 
-        LIBCFS_ALLOC (route, sizeof (*route));
-        if (route == NULL)
-                return (NULL);
+       LIBCFS_ALLOC (route, sizeof (*route));
+       if (route == NULL)
+               return (NULL);
 
-        cfs_atomic_set (&route->ksnr_refcount, 1);
-        route->ksnr_peer = NULL;
-        route->ksnr_retry_interval = 0;         /* OK to connect at any time */
-        route->ksnr_ipaddr = ipaddr;
+       atomic_set (&route->ksnr_refcount, 1);
+       route->ksnr_peer = NULL;
+       route->ksnr_retry_interval = 0;         /* OK to connect at any time */
+       route->ksnr_ipaddr = ipaddr;
         route->ksnr_port = port;
         route->ksnr_scheduled = 0;
         route->ksnr_connecting = 0;
@@ -91,12 +91,12 @@ ksocknal_create_route (__u32 ipaddr, int port)
 void
 ksocknal_destroy_route (ksock_route_t *route)
 {
-        LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
+       LASSERT (atomic_read(&route->ksnr_refcount) == 0);
 
-        if (route->ksnr_peer != NULL)
-                ksocknal_peer_decref(route->ksnr_peer);
+       if (route->ksnr_peer != NULL)
+               ksocknal_peer_decref(route->ksnr_peer);
 
-        LIBCFS_FREE (route, sizeof (*route));
+       LIBCFS_FREE (route, sizeof (*route));
 }
 
 int
@@ -115,14 +115,14 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
        memset (peer, 0, sizeof (*peer));       /* NULL pointers/clear flags etc */
 
-        peer->ksnp_ni = ni;
-        peer->ksnp_id = id;
-        cfs_atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
-        peer->ksnp_closing = 0;
-        peer->ksnp_accepting = 0;
-        peer->ksnp_proto = NULL;
-        peer->ksnp_last_alive = 0;
-        peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+       peer->ksnp_ni = ni;
+       peer->ksnp_id = id;
+       atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
+       peer->ksnp_closing = 0;
+       peer->ksnp_accepting = 0;
+       peer->ksnp_proto = NULL;
+       peer->ksnp_last_alive = 0;
+       peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 
         CFS_INIT_LIST_HEAD (&peer->ksnp_conns);
         CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
@@ -151,17 +151,17 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 void
 ksocknal_destroy_peer (ksock_peer_t *peer)
 {
-        ksock_net_t    *net = peer->ksnp_ni->ni_data;
+       ksock_net_t    *net = peer->ksnp_ni->ni_data;
 
-        CDEBUG (D_NET, "peer %s %p deleted\n",
-                libcfs_id2str(peer->ksnp_id), peer);
+       CDEBUG (D_NET, "peer %s %p deleted\n",
+               libcfs_id2str(peer->ksnp_id), peer);
 
-        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
-        LASSERT (peer->ksnp_accepting == 0);
-        LASSERT (cfs_list_empty (&peer->ksnp_conns));
-        LASSERT (cfs_list_empty (&peer->ksnp_routes));
-        LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
-        LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
+       LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+       LASSERT (peer->ksnp_accepting == 0);
+       LASSERT (cfs_list_empty (&peer->ksnp_conns));
+       LASSERT (cfs_list_empty (&peer->ksnp_routes));
+       LASSERT (cfs_list_empty (&peer->ksnp_tx_queue));
+       LASSERT (cfs_list_empty (&peer->ksnp_zc_req_list));
 
         LIBCFS_FREE (peer, sizeof (*peer));
 
@@ -194,12 +194,12 @@ ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
                     peer->ksnp_id.pid != id.pid)
                         continue;
 
-                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
-                       peer, libcfs_id2str(id),
-                       cfs_atomic_read(&peer->ksnp_refcount));
-                return (peer);
-        }
-        return (NULL);
+               CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
+                      peer, libcfs_id2str(id),
+                      atomic_read(&peer->ksnp_refcount));
+               return (peer);
+       }
+       return (NULL);
 }
 
 ksock_peer_t *
@@ -1051,24 +1051,24 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_peer = NULL;
         conn->ksnc_route = NULL;
         conn->ksnc_sock = sock;
-        /* 2 ref, 1 for conn, another extra ref prevents socket
-         * being closed before establishment of connection */
-        cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
-        conn->ksnc_type = type;
-        ksocknal_lib_save_callback(sock, conn);
-        cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
-
-        conn->ksnc_rx_ready = 0;
-        conn->ksnc_rx_scheduled = 0;
-
-        CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
-        conn->ksnc_tx_ready = 0;
-        conn->ksnc_tx_scheduled = 0;
-        conn->ksnc_tx_carrier = NULL;
-        cfs_atomic_set (&conn->ksnc_tx_nob, 0);
-
-        LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
-                                     kshm_ips[LNET_MAX_INTERFACES]));
+       /* 2 ref, 1 for conn, another extra ref prevents socket
+        * being closed before establishment of connection */
+       atomic_set (&conn->ksnc_sock_refcount, 2);
+       conn->ksnc_type = type;
+       ksocknal_lib_save_callback(sock, conn);
+       atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+
+       conn->ksnc_rx_ready = 0;
+       conn->ksnc_rx_scheduled = 0;
+
+       CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
+       conn->ksnc_tx_ready = 0;
+       conn->ksnc_tx_scheduled = 0;
+       conn->ksnc_tx_carrier = NULL;
+       atomic_set (&conn->ksnc_tx_nob, 0);
+
+       LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
+                                    kshm_ips[LNET_MAX_INTERFACES]));
         if (hello == NULL) {
                 rc = -ENOMEM;
                 goto failed_1;
@@ -1619,7 +1619,7 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 {
        /* Queue the conn for the reaper to destroy */
 
-       LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+       LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
@@ -1631,18 +1631,18 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 void
 ksocknal_destroy_conn (ksock_conn_t *conn)
 {
-        cfs_time_t      last_rcv;
+       cfs_time_t      last_rcv;
 
-        /* Final coup-de-grace of the reaper */
-        CDEBUG (D_NET, "connection %p\n", conn);
+       /* Final coup-de-grace of the reaper */
+       CDEBUG (D_NET, "connection %p\n", conn);
 
-        LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
-        LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
-        LASSERT (conn->ksnc_sock == NULL);
-        LASSERT (conn->ksnc_route == NULL);
-        LASSERT (!conn->ksnc_tx_scheduled);
-        LASSERT (!conn->ksnc_rx_scheduled);
-        LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
+       LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
+       LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+       LASSERT (conn->ksnc_sock == NULL);
+       LASSERT (conn->ksnc_route == NULL);
+       LASSERT (!conn->ksnc_tx_scheduled);
+       LASSERT (!conn->ksnc_rx_scheduled);
+       LASSERT (cfs_list_empty(&conn->ksnc_tx_queue));
 
         /* complete current receive if any */
         switch (conn->ksnc_rx_state) {
@@ -2220,7 +2220,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 void
 ksocknal_free_buffers (void)
 {
-        LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+       LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
 
        if (ksocknal_data.ksnd_sched_info != NULL) {
                struct ksock_sched_info *info;
@@ -2268,9 +2268,9 @@ ksocknal_base_shutdown(void)
        int                     i;
        int                     j;
 
-        CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               cfs_atomic_read (&libcfs_kmemory));
-        LASSERT (ksocknal_data.ksnd_nnets == 0);
+       CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
+              atomic_read (&libcfs_kmemory));
+       LASSERT (ksocknal_data.ksnd_nnets == 0);
 
         switch (ksocknal_data.ksnd_init) {
         default:
@@ -2347,7 +2347,7 @@ ksocknal_base_shutdown(void)
         }
 
        CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-              cfs_atomic_read (&libcfs_kmemory));
+              atomic_read (&libcfs_kmemory));
 
        module_put(THIS_MODULE);
 }
@@ -2521,35 +2521,35 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 ksock_route_t *route;
                 ksock_conn_t  *conn;
 
-                CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
-                       "closing %d, accepting %d, err %d, zcookie "LPU64", "
-                       "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
-                       cfs_atomic_read(&peer->ksnp_refcount),
-                       peer->ksnp_sharecount, peer->ksnp_closing,
-                       peer->ksnp_accepting, peer->ksnp_error,
-                       peer->ksnp_zc_next_cookie,
-                       !cfs_list_empty(&peer->ksnp_tx_queue),
-                       !cfs_list_empty(&peer->ksnp_zc_req_list));
-
-                cfs_list_for_each (tmp, &peer->ksnp_routes) {
-                        route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
-                        CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
-                               "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
-                               route->ksnr_scheduled, route->ksnr_connecting,
-                               route->ksnr_connected, route->ksnr_deleted);
-                }
+               CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
+                      "closing %d, accepting %d, err %d, zcookie "LPU64", "
+                      "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
+                      atomic_read(&peer->ksnp_refcount),
+                      peer->ksnp_sharecount, peer->ksnp_closing,
+                      peer->ksnp_accepting, peer->ksnp_error,
+                      peer->ksnp_zc_next_cookie,
+                      !cfs_list_empty(&peer->ksnp_tx_queue),
+                      !cfs_list_empty(&peer->ksnp_zc_req_list));
+
+               cfs_list_for_each (tmp, &peer->ksnp_routes) {
+                       route = cfs_list_entry(tmp, ksock_route_t, ksnr_list);
+                       CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
+                              "del %d\n", atomic_read(&route->ksnr_refcount),
+                              route->ksnr_scheduled, route->ksnr_connecting,
+                              route->ksnr_connected, route->ksnr_deleted);
+               }
 
-                cfs_list_for_each (tmp, &peer->ksnp_conns) {
-                        conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
-                        CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
-                               cfs_atomic_read(&conn->ksnc_conn_refcount),
-                               cfs_atomic_read(&conn->ksnc_sock_refcount),
-                               conn->ksnc_type, conn->ksnc_closing);
-                }
-        }
+               cfs_list_for_each (tmp, &peer->ksnp_conns) {
+                       conn = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
+                       CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
+                              atomic_read(&conn->ksnc_conn_refcount),
+                              atomic_read(&conn->ksnc_sock_refcount),
+                              conn->ksnc_type, conn->ksnc_closing);
+               }
+       }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
-        return;
+       return;
 }
 
 void
index 9c0c5de..81f2fcd 100644 (file)
@@ -183,7 +183,7 @@ typedef struct
        /* schedulers information */
        struct ksock_sched_info **ksnd_sched_info;
 
-       cfs_atomic_t      ksnd_nactive_txs;    /* #active txs */
+       atomic_t      ksnd_nactive_txs;    /* #active txs */
 
        cfs_list_t        ksnd_deathrow_conns; /* conns to close: reaper_lock*/
        cfs_list_t        ksnd_zombie_conns;   /* conns to free: reaper_lock */
@@ -235,11 +235,11 @@ struct ksock_proto;                             /* forward ref */
 
 typedef struct                                  /* transmit packet */
 {
-        cfs_list_t     tx_list;        /* queue on conn for transmission etc */
-        cfs_list_t     tx_zc_list;     /* queue on peer for ZC request */
-        cfs_atomic_t   tx_refcount;    /* tx reference count */
-        int            tx_nob;         /* # packet bytes */
-        int            tx_resid;       /* residual bytes */
+       cfs_list_t     tx_list;        /* queue on conn for transmission etc */
+       cfs_list_t     tx_zc_list;     /* queue on peer for ZC request */
+       atomic_t       tx_refcount;    /* tx reference count */
+       int            tx_nob;         /* # packet bytes */
+       int            tx_resid;       /* residual bytes */
         int            tx_niov;        /* # packet iovec frags */
         struct iovec  *tx_iov;         /* packet iovec frags */
         int            tx_nkiov;       /* # packet page frags */
@@ -287,13 +287,13 @@ typedef struct ksock_conn
         struct ksock_peer  *ksnc_peer;         /* owning peer */
         struct ksock_route *ksnc_route;        /* owning route */
         cfs_list_t          ksnc_list;         /* stash on peer's conn list */
-        cfs_socket_t       *ksnc_sock;         /* actual socket */
-        void               *ksnc_saved_data_ready; /* socket's original data_ready() callback */
-        void               *ksnc_saved_write_space; /* socket's original write_space() callback */
-        cfs_atomic_t        ksnc_conn_refcount; /* conn refcount */
-        cfs_atomic_t        ksnc_sock_refcount; /* sock refcount */
-        ksock_sched_t      *ksnc_scheduler;  /* who schedules this connection */
-        __u32               ksnc_myipaddr;   /* my IP */
+       cfs_socket_t        *ksnc_sock;         /* actual socket */
+       void                *ksnc_saved_data_ready; /* socket's original data_ready() callback */
+       void                *ksnc_saved_write_space; /* socket's original write_space() callback */
+       atomic_t            ksnc_conn_refcount; /* conn refcount */
+       atomic_t            ksnc_sock_refcount; /* sock refcount */
+       ksock_sched_t       *ksnc_scheduler;  /* who schedules this connection */
+       __u32               ksnc_myipaddr;   /* my IP */
         __u32               ksnc_ipaddr;     /* peer's IP */
         int                 ksnc_port;       /* peer's port */
        signed int          ksnc_type:3;     /* type of connection,
@@ -330,22 +330,22 @@ typedef struct ksock_conn
         cfs_list_t            ksnc_tx_list;     /* where I enq waiting for output space */
         cfs_list_t            ksnc_tx_queue;    /* packets waiting to be sent */
         ksock_tx_t           *ksnc_tx_carrier;  /* next TX that can carry a LNet message or ZC-ACK */
-        cfs_time_t            ksnc_tx_deadline; /* when (in jiffies) tx times out */
-        int                   ksnc_tx_bufnob;     /* send buffer marker */
-        cfs_atomic_t          ksnc_tx_nob;        /* # bytes queued */
-        int                   ksnc_tx_ready;      /* write space */
-        int                   ksnc_tx_scheduled;  /* being progressed */
-        cfs_time_t            ksnc_tx_last_post;  /* time stamp of the last posted TX */
+       cfs_time_t            ksnc_tx_deadline; /* when (in jiffies) tx times out */
+       int                   ksnc_tx_bufnob;     /* send buffer marker */
+       atomic_t              ksnc_tx_nob;        /* # bytes queued */
+       int                   ksnc_tx_ready;      /* write space */
+       int                   ksnc_tx_scheduled;  /* being progressed */
+       cfs_time_t            ksnc_tx_last_post;  /* time stamp of the last posted TX */
 } ksock_conn_t;
 
 typedef struct ksock_route
 {
-        cfs_list_t            ksnr_list;        /* chain on peer route list */
-        cfs_list_t            ksnr_connd_list;  /* chain on ksnr_connd_routes */
-        struct ksock_peer    *ksnr_peer;        /* owning peer */
-        cfs_atomic_t          ksnr_refcount;    /* # users */
-        cfs_time_t            ksnr_timeout;     /* when (in jiffies) reconnection can happen next */
-        cfs_duration_t        ksnr_retry_interval; /* how long between retries */
+       cfs_list_t            ksnr_list;        /* chain on peer route list */
+       cfs_list_t            ksnr_connd_list;  /* chain on ksnr_connd_routes */
+       struct ksock_peer    *ksnr_peer;        /* owning peer */
+       atomic_t          ksnr_refcount;    /* # users */
+       cfs_time_t            ksnr_timeout;     /* when (in jiffies) reconnection can happen next */
+       cfs_duration_t        ksnr_retry_interval; /* how long between retries */
         __u32                 ksnr_myipaddr;    /* my IP */
         __u32                 ksnr_ipaddr;      /* IP address to connect to */
         int                   ksnr_port;        /* port to connect to */
@@ -361,12 +361,12 @@ typedef struct ksock_route
 
 typedef struct ksock_peer
 {
-        cfs_list_t            ksnp_list;        /* stash on global peer list */
-        cfs_time_t            ksnp_last_alive;  /* when (in jiffies) I was last alive */
-        lnet_process_id_t     ksnp_id;       /* who's on the other end(s) */
-        cfs_atomic_t          ksnp_refcount; /* # users */
-        int                   ksnp_sharecount;  /* lconf usage counter */
-        int                   ksnp_closing;  /* being closed */
+       cfs_list_t            ksnp_list;        /* stash on global peer list */
+       cfs_time_t            ksnp_last_alive;  /* when (in jiffies) I was last alive */
+       lnet_process_id_t     ksnp_id;       /* who's on the other end(s) */
+       atomic_t              ksnp_refcount; /* # users */
+       int                   ksnp_sharecount;  /* lconf usage counter */
+       int                   ksnp_closing;  /* being closed */
         int                   ksnp_accepting;/* # passive connections pending */
         int                   ksnp_error;    /* errno on closing last conn */
         __u64                 ksnp_zc_next_cookie;/* ZC completion cookie */
@@ -449,8 +449,8 @@ ksocknal_nid2peerlist (lnet_nid_t nid)
 static inline void
 ksocknal_conn_addref (ksock_conn_t *conn)
 {
-        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
-        cfs_atomic_inc(&conn->ksnc_conn_refcount);
+       LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+       atomic_inc(&conn->ksnc_conn_refcount);
 }
 
 extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
@@ -459,44 +459,44 @@ extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
 static inline void
 ksocknal_conn_decref (ksock_conn_t *conn)
 {
-        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&conn->ksnc_conn_refcount))
-                ksocknal_queue_zombie_conn(conn);
+       LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+       if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+               ksocknal_queue_zombie_conn(conn);
 }
 
 static inline int
 ksocknal_connsock_addref (ksock_conn_t *conn)
 {
-        int   rc = -ESHUTDOWN;
+       int   rc = -ESHUTDOWN;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
        if (!conn->ksnc_closing) {
-               LASSERT(cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
-               cfs_atomic_inc(&conn->ksnc_sock_refcount);
+               LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
+               atomic_inc(&conn->ksnc_sock_refcount);
                rc = 0;
        }
        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-        return (rc);
+       return (rc);
 }
 
 static inline void
 ksocknal_connsock_decref (ksock_conn_t *conn)
 {
-        LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
-                LASSERT (conn->ksnc_closing);
-                libcfs_sock_release(conn->ksnc_sock);
-                conn->ksnc_sock = NULL;
-                ksocknal_finalize_zcreq(conn);
-        }
+       LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
+       if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+               LASSERT (conn->ksnc_closing);
+               libcfs_sock_release(conn->ksnc_sock);
+               conn->ksnc_sock = NULL;
+               ksocknal_finalize_zcreq(conn);
+       }
 }
 
 static inline void
 ksocknal_tx_addref (ksock_tx_t *tx)
 {
-        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
-        cfs_atomic_inc(&tx->tx_refcount);
+       LASSERT (atomic_read(&tx->tx_refcount) > 0);
+       atomic_inc(&tx->tx_refcount);
 }
 
 extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx);
@@ -505,16 +505,16 @@ extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
 static inline void
 ksocknal_tx_decref (ksock_tx_t *tx)
 {
-        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&tx->tx_refcount))
-                ksocknal_tx_done(NULL, tx);
+       LASSERT (atomic_read(&tx->tx_refcount) > 0);
+       if (atomic_dec_and_test(&tx->tx_refcount))
+               ksocknal_tx_done(NULL, tx);
 }
 
 static inline void
 ksocknal_route_addref (ksock_route_t *route)
 {
-        LASSERT (cfs_atomic_read(&route->ksnr_refcount) > 0);
-        cfs_atomic_inc(&route->ksnr_refcount);
+       LASSERT (atomic_read(&route->ksnr_refcount) > 0);
+       atomic_inc(&route->ksnr_refcount);
 }
 
 extern void ksocknal_destroy_route (ksock_route_t *route);
@@ -522,16 +522,16 @@ extern void ksocknal_destroy_route (ksock_route_t *route);
 static inline void
 ksocknal_route_decref (ksock_route_t *route)
 {
-        LASSERT (cfs_atomic_read (&route->ksnr_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&route->ksnr_refcount))
-                ksocknal_destroy_route (route);
+       LASSERT (atomic_read (&route->ksnr_refcount) > 0);
+       if (atomic_dec_and_test(&route->ksnr_refcount))
+               ksocknal_destroy_route (route);
 }
 
 static inline void
 ksocknal_peer_addref (ksock_peer_t *peer)
 {
-        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
-        cfs_atomic_inc(&peer->ksnp_refcount);
+       LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+       atomic_inc(&peer->ksnp_refcount);
 }
 
 extern void ksocknal_destroy_peer (ksock_peer_t *peer);
@@ -539,9 +539,9 @@ extern void ksocknal_destroy_peer (ksock_peer_t *peer);
 static inline void
 ksocknal_peer_decref (ksock_peer_t *peer)
 {
-        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
-        if (cfs_atomic_dec_and_test(&peer->ksnp_refcount))
-                ksocknal_destroy_peer (peer);
+       LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+       if (atomic_dec_and_test(&peer->ksnp_refcount))
+               ksocknal_destroy_peer (peer);
 }
 
 int ksocknal_startup (lnet_ni_t *ni);
index a221718..e945ef2 100644 (file)
@@ -53,15 +53,15 @@ ksocknal_alloc_tx(int type, int size)
         if (tx == NULL)
                 return NULL;
 
-        cfs_atomic_set(&tx->tx_refcount, 1);
-        tx->tx_zc_aborted = 0;
-        tx->tx_zc_capable = 0;
-        tx->tx_zc_checked = 0;
-        tx->tx_desc_size  = size;
+       atomic_set(&tx->tx_refcount, 1);
+       tx->tx_zc_aborted = 0;
+       tx->tx_zc_capable = 0;
+       tx->tx_zc_checked = 0;
+       tx->tx_desc_size  = size;
 
-        cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+       atomic_inc(&ksocknal_data.ksnd_nactive_txs);
 
-        return tx;
+       return tx;
 }
 
 ksock_tx_t *
@@ -93,7 +93,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 void
 ksocknal_free_tx (ksock_tx_t *tx)
 {
-       cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+       atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
        if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
                /* it's a noop tx */
@@ -238,7 +238,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 }
 
                 /* socket's wmem_queued now includes 'rc' bytes */
-                cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
+               atomic_sub (rc, &conn->ksnc_tx_nob);
                 rc = 0;
 
         } while (tx->tx_resid != 0);
@@ -426,7 +426,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int error)
 
                 cfs_list_del (&tx->tx_list);
 
-                LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
+               LASSERT (atomic_read(&tx->tx_refcount) == 1);
                 ksocknal_tx_done (ni, tx);
         }
 }
@@ -529,7 +529,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 counter++;   /* exponential backoff warnings */
                 if ((counter & (-counter)) == counter)
                         CWARN("%u ENOMEM tx %p (%u allocated)\n",
-                              counter, conn, cfs_atomic_read(&libcfs_kmemory));
+                             counter, conn, atomic_read(&libcfs_kmemory));
 
                 /* Queue on ksnd_enomem_conns for retry after a timeout */
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -631,7 +631,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 
         cfs_list_for_each (tmp, &peer->ksnp_conns) {
                 ksock_conn_t *c  = cfs_list_entry(tmp, ksock_conn_t, ksnc_list);
-                int           nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+               int           nob = atomic_read(&c->ksnc_tx_nob) +
                                     libcfs_sock_wmem_queued(c->ksnc_sock);
                 int           rc;
 
@@ -681,7 +681,7 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
 {
         conn->ksnc_proto->pro_pack(tx);
 
-        cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+       atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
         ksocknal_conn_addref(conn); /* +1 ref for tx */
         tx->tx_conn = conn;
 }
@@ -761,7 +761,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
         }
 
         if (ztx != NULL) {
-                cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+               atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
                 cfs_list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
         }
 
@@ -1117,7 +1117,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
         lnet_process_id_t *id;
         int                rc;
 
-        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+       LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
 
         /* NB: sched lock NOT held */
         /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
index 5ba70d7..25d94cb 100644 (file)
@@ -332,7 +332,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
                 CERROR ("BRW RPC to %s failed with %d\n",
                         libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
                 if (!tsi->tsi_stopping) /* rpc could have been aborted */
-                        cfs_atomic_inc(&sn->sn_brw_errors);
+                       atomic_inc(&sn->sn_brw_errors);
                 goto out;
         }
 
@@ -346,7 +346,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
                 libcfs_id2str(rpc->crpc_dest), reply->brw_status);
 
         if (reply->brw_status != 0) {
-                cfs_atomic_inc(&sn->sn_brw_errors);
+               atomic_inc(&sn->sn_brw_errors);
                 rpc->crpc_status = -(int)reply->brw_status;
                 goto out;
         }
@@ -356,7 +356,7 @@ brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
         if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
                 CERROR ("Bulk data from %s is corrupted!\n",
                         libcfs_id2str(rpc->crpc_dest));
-                cfs_atomic_inc(&sn->sn_brw_errors);
+               atomic_inc(&sn->sn_brw_errors);
                 rpc->crpc_status = -EBADMSG;
         }
 
index 72d6add..37455a2 100644 (file)
@@ -83,7 +83,7 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
        }
 
        /* wakeup (transaction)thread if I'm the last RPC in the transaction */
-       if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+       if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
                wake_up(&crpc->crp_trans->tas_waitq);
 
        spin_unlock(&rpc->crpc_lock);
@@ -109,7 +109,7 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
        crpc->crp_embedded = embedded;
         CFS_INIT_LIST_HEAD(&crpc->crp_link);
 
-        cfs_atomic_inc(&console_session.ses_rpc_counter);
+       atomic_inc(&console_session.ses_rpc_counter);
 
         return 0;
 }
@@ -180,7 +180,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
        }
 
        /* RPC is not alive now */
-       cfs_atomic_dec(&console_session.ses_rpc_counter);
+       atomic_dec(&console_session.ses_rpc_counter);
 }
 
 void
@@ -190,7 +190,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc)
 
         LASSERT (trans != NULL);
 
-        cfs_atomic_inc(&trans->tas_remaining);
+       atomic_inc(&trans->tas_remaining);
         crpc->crp_posted = 1;
 
         sfw_post_rpc(crpc->crp_rpc);
@@ -266,7 +266,7 @@ lstcon_rpc_trans_prep(cfs_list_t *translist,
         cfs_list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
 
        CFS_INIT_LIST_HEAD(&trans->tas_rpcs_list);
-       cfs_atomic_set(&trans->tas_remaining, 0);
+       atomic_set(&trans->tas_remaining, 0);
        init_waitqueue_head(&trans->tas_waitq);
 
        spin_lock(&console_session.ses_rpc_lock);
@@ -333,7 +333,7 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
             !cfs_list_empty(&trans->tas_olink)) /* Not an end session RPC */
                 return 1;
 
-        return (cfs_atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+       return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
 }
 
 int
@@ -586,10 +586,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
 
                spin_unlock(&rpc->crpc_lock);
 
-                cfs_atomic_dec(&trans->tas_remaining);
+               atomic_dec(&trans->tas_remaining);
         }
 
-        LASSERT (cfs_atomic_read(&trans->tas_remaining) == 0);
+       LASSERT (atomic_read(&trans->tas_remaining) == 0);
 
         cfs_list_del(&trans->tas_link);
         if (!cfs_list_empty(&trans->tas_olink))
@@ -1304,7 +1304,7 @@ lstcon_rpc_pinger_start(void)
         int             rc;
 
         LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
-        LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
+       LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
 
         rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
                                    &console_session.ses_ping);
@@ -1371,11 +1371,11 @@ lstcon_rpc_cleanup_wait(void)
 
        spin_lock(&console_session.ses_rpc_lock);
 
-        lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
+       lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
                        console_session.ses_rpc_lock,
                        "Network is not accessable or target is down, "
                        "waiting for %d console RPCs to being recycled\n",
-                       cfs_atomic_read(&console_session.ses_rpc_counter));
+                      atomic_read(&console_session.ses_rpc_counter));
 
         cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
         cfs_list_del_init(&console_session.ses_rpc_freelist);
@@ -1400,7 +1400,7 @@ lstcon_rpc_module_init(void)
         console_session.ses_ping = NULL;
 
        spin_lock_init(&console_session.ses_rpc_lock);
-       cfs_atomic_set(&console_session.ses_rpc_counter, 0);
+       atomic_set(&console_session.ses_rpc_counter, 0);
        CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
 
        return 0;
@@ -1410,7 +1410,7 @@ void
 lstcon_rpc_module_fini(void)
 {
         LASSERT (cfs_list_empty(&console_session.ses_rpc_freelist));
-        LASSERT (cfs_atomic_read(&console_session.ses_rpc_counter) == 0);
+       LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
 }
 
 #endif
index 59aead3..7e3ddb5 100644 (file)
@@ -88,7 +88,7 @@ typedef struct lstcon_rpc_trans {
        /* test features mask */
        unsigned              tas_features;
        wait_queue_head_t     tas_waitq;     /* wait queue head */
-       cfs_atomic_t          tas_remaining; /* # of un-scheduled rpcs */
+       atomic_t          tas_remaining; /* # of un-scheduled rpcs */
        cfs_list_t            tas_rpcs_list; /* queued requests */
 } lstcon_rpc_trans_t;
 
index 0ddcc4d..71efca5 100644 (file)
@@ -163,8 +163,8 @@ typedef struct {
         cfs_list_t             *ses_ndl_hash;   /* hash table of nodes */
 
        spinlock_t          ses_rpc_lock;   /* serialize */
-        cfs_atomic_t            ses_rpc_counter;/* # of initialized RPCs */
-        cfs_list_t              ses_rpc_freelist; /* idle console rpc */
+       atomic_t            ses_rpc_counter;/* # of initialized RPCs */
+       cfs_list_t              ses_rpc_freelist; /* idle console rpc */
 } lstcon_session_t;                             /*** session descriptor */
 
 extern lstcon_session_t         console_session;
index 2e32c3d..6959a5b 100644 (file)
@@ -100,14 +100,14 @@ do {                                    \
         __swab64s(&(lc).route_length);  \
 } while (0)
 
-#define sfw_test_active(t)      (cfs_atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b)     (cfs_atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive) != 0)
 
 struct smoketest_framework {
-        cfs_list_t         fw_zombie_rpcs;     /* RPCs to be recycled */
-        cfs_list_t         fw_zombie_sessions; /* stopping sessions */
-        cfs_list_t         fw_tests;           /* registered test cases */
-        cfs_atomic_t       fw_nzombies;        /* # zombie sessions */
+       cfs_list_t         fw_zombie_rpcs;     /* RPCs to be recycled */
+       cfs_list_t         fw_zombie_sessions; /* stopping sessions */
+       cfs_list_t         fw_tests;           /* registered test cases */
+       atomic_t       fw_nzombies;        /* # zombie sessions */
        spinlock_t         fw_lock;             /* serialise */
        sfw_session_t     *fw_session;          /* _the_ session */
        int                fw_shuttingdown;     /* shutdown in progress */
@@ -214,7 +214,7 @@ sfw_deactivate_session (void)
         LASSERT (!sn->sn_timer_active);
 
         sfw_data.fw_session = NULL;
-        cfs_atomic_inc(&sfw_data.fw_nzombies);
+       atomic_inc(&sfw_data.fw_nzombies);
         cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
 
        spin_unlock(&sfw_data.fw_lock);
@@ -284,9 +284,9 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
         memset(sn, 0, sizeof(sfw_session_t));
         CFS_INIT_LIST_HEAD(&sn->sn_list);
         CFS_INIT_LIST_HEAD(&sn->sn_batches);
-        cfs_atomic_set(&sn->sn_refcount, 1);        /* +1 for caller */
-        cfs_atomic_set(&sn->sn_brw_errors, 0);
-        cfs_atomic_set(&sn->sn_ping_errors, 0);
+       atomic_set(&sn->sn_refcount, 1);        /* +1 for caller */
+       atomic_set(&sn->sn_brw_errors, 0);
+       atomic_set(&sn->sn_ping_errors, 0);
        strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
 
         sn->sn_timer_active = 0;
@@ -324,7 +324,7 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
 {
         LASSERT (rpc->crpc_bulk.bk_niov == 0);
         LASSERT (cfs_list_empty(&rpc->crpc_list));
-        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
+       LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
 #ifndef __KERNEL__
         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
 #endif
@@ -381,7 +381,7 @@ sfw_bid2batch (lst_bid_t bid)
         bat->bat_error    = 0;
         bat->bat_session  = sn;
         bat->bat_id       = bid;
-        cfs_atomic_set(&bat->bat_nactive, 0);
+       atomic_set(&bat->bat_nactive, 0);
         CFS_INIT_LIST_HEAD(&bat->bat_tests);
 
         cfs_list_add_tail(&bat->bat_list, &sn->sn_batches);
@@ -417,14 +417,14 @@ sfw_get_stats (srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
                                        sn->sn_started), &tv);
 
         cnt->running_ms      = (__u32)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
-        cnt->brw_errors      = cfs_atomic_read(&sn->sn_brw_errors);
-        cnt->ping_errors     = cfs_atomic_read(&sn->sn_ping_errors);
-        cnt->zombie_sessions = cfs_atomic_read(&sfw_data.fw_nzombies);
+       cnt->brw_errors      = atomic_read(&sn->sn_brw_errors);
+       cnt->ping_errors     = atomic_read(&sn->sn_ping_errors);
+       cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
 
         cnt->active_batches = 0;
         cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
-                if (cfs_atomic_read(&bat->bat_nactive) > 0)
+               if (atomic_read(&bat->bat_nactive) > 0)
                         cnt->active_batches++;
         }
 
@@ -452,7 +452,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
                 reply->mksn_timeout = sn->sn_timeout;
 
                 if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
-                        cfs_atomic_inc(&sn->sn_refcount);
+                       atomic_inc(&sn->sn_refcount);
                         return 0;
                 }
 
@@ -518,7 +518,7 @@ sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
                 return 0;
         }
 
-        if (!cfs_atomic_dec_and_test(&sn->sn_refcount)) {
+       if (!atomic_dec_and_test(&sn->sn_refcount)) {
                 reply->rmsn_status = 0;
                 return 0;
         }
@@ -700,7 +700,7 @@ sfw_destroy_session (sfw_session_t *sn)
         }
 
         LIBCFS_FREE(sn, sizeof(*sn));
-        cfs_atomic_dec(&sfw_data.fw_nzombies);
+       atomic_dec(&sfw_data.fw_nzombies);
         return;
 }
 
@@ -770,7 +770,7 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
 
         memset(tsi, 0, sizeof(*tsi));
        spin_lock_init(&tsi->tsi_lock);
-        cfs_atomic_set(&tsi->tsi_nactive, 0);
+       atomic_set(&tsi->tsi_nactive, 0);
         CFS_INIT_LIST_HEAD(&tsi->tsi_units);
         CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
         CFS_INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
@@ -861,7 +861,7 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
 
         LASSERT (sfw_test_active(tsi));
 
-        if (!cfs_atomic_dec_and_test(&tsi->tsi_nactive))
+       if (!atomic_dec_and_test(&tsi->tsi_nactive))
                 return;
 
         /* the test instance is done */
@@ -873,7 +873,7 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
 
        spin_lock(&sfw_data.fw_lock);
 
-       if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+       if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
            sn == sfw_data.fw_session) {                  /* sn also active */
                spin_unlock(&sfw_data.fw_lock);
                 return;
@@ -1033,7 +1033,7 @@ sfw_run_batch (sfw_batch_t *tsb)
 
         if (sfw_batch_active(tsb)) {
                 CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
-                       tsb->bat_id.bat_id, cfs_atomic_read(&tsb->bat_nactive));
+                      tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
                 return 0;
         }
 
@@ -1045,11 +1045,11 @@ sfw_run_batch (sfw_batch_t *tsb)
                 LASSERT (!tsi->tsi_stopping);
                 LASSERT (!sfw_test_active(tsi));
 
-                cfs_atomic_inc(&tsb->bat_nactive);
+               atomic_inc(&tsb->bat_nactive);
 
                 cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
                                                sfw_test_unit_t, tsu_list) {
-                        cfs_atomic_inc(&tsi->tsi_nactive);
+                       atomic_inc(&tsi->tsi_nactive);
                         tsu->tsu_loop = tsi->tsi_loop;
                         wi = &tsu->tsu_worker;
                        swi_init_workitem(wi, tsu, sfw_run_test,
@@ -1115,7 +1115,7 @@ sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
                 return -EINVAL;
 
         if (testidx == 0) {
-                reply->bar_active = cfs_atomic_read(&tsb->bat_nactive);
+               reply->bar_active = atomic_read(&tsb->bat_nactive);
                 return 0;
         }
 
@@ -1124,7 +1124,7 @@ sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
                 if (testidx-- > 1)
                         continue;
 
-                reply->bar_active = cfs_atomic_read(&tsi->tsi_nactive);
+               reply->bar_active = atomic_read(&tsi->tsi_nactive);
                 return 0;
         }
 
@@ -1611,7 +1611,7 @@ sfw_unpack_message (srpc_msg_t *msg)
 void
 sfw_abort_rpc (srpc_client_rpc_t *rpc)
 {
-       LASSERT(cfs_atomic_read(&rpc->crpc_refcount) > 0);
+       LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
        LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
        spin_lock(&rpc->crpc_lock);
@@ -1731,7 +1731,7 @@ sfw_startup (void)
         sfw_data.fw_session     = NULL;
         sfw_data.fw_active_srpc = NULL;
        spin_lock_init(&sfw_data.fw_lock);
-        cfs_atomic_set(&sfw_data.fw_nzombies, 0);
+       atomic_set(&sfw_data.fw_nzombies, 0);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
@@ -1817,10 +1817,10 @@ sfw_shutdown (void)
                                "waiting for session timer to explode.\n");
 
         sfw_deactivate_session();
-        lst_wait_until(cfs_atomic_read(&sfw_data.fw_nzombies) == 0,
+       lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
                        sfw_data.fw_lock,
                        "waiting for %d zombie sessions to die.\n",
-                       cfs_atomic_read(&sfw_data.fw_nzombies));
+                      atomic_read(&sfw_data.fw_nzombies));
 
        spin_unlock(&sfw_data.fw_lock);
 
index 41282f4..f512036 100644 (file)
@@ -77,7 +77,7 @@ ping_client_fini (sfw_test_instance_t *tsi)
         LASSERT (sn != NULL);
         LASSERT (tsi->tsi_is_client);
 
-        errors = cfs_atomic_read(&sn->sn_ping_errors);
+       errors = atomic_read(&sn->sn_ping_errors);
         if (errors)
                 CWARN ("%d pings have failed.\n", errors);
         else
@@ -129,7 +129,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 
         if (rpc->crpc_status != 0) {
                 if (!tsi->tsi_stopping) /* rpc could have been aborted */
-                        cfs_atomic_inc(&sn->sn_ping_errors);
+                       atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Unable to ping %s (%d): %d\n",
                         libcfs_id2str(rpc->crpc_dest),
                         reqst->pnr_seq, rpc->crpc_status);
@@ -144,7 +144,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 
         if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
                 rpc->crpc_status = -EBADMSG;
-                cfs_atomic_inc(&sn->sn_ping_errors);
+               atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Bad magic %u from %s, %u expected.\n",
                         reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
                         LST_PING_TEST_MAGIC);
@@ -153,7 +153,7 @@ ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
 
         if (reply->pnr_seq != reqst->pnr_seq) {
                 rpc->crpc_status = -EBADMSG;
-                cfs_atomic_inc(&sn->sn_ping_errors);
+               atomic_inc(&sn->sn_ping_errors);
                 CERROR ("Bad seq %u from %s, %u expected.\n",
                         reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
                         reqst->pnr_seq);
index d1ecabd..ab5ea8f 100644 (file)
@@ -218,7 +218,7 @@ typedef struct srpc_client_rpc {
        cfs_list_t              crpc_list;      /* chain on user's lists */
        spinlock_t              crpc_lock;      /* serialize */
         int                  crpc_service;
-        cfs_atomic_t         crpc_refcount;
+       atomic_t         crpc_refcount;
         int                  crpc_timeout; /* # seconds to wait for reply */
         stt_timer_t          crpc_timer;
         swi_workitem_t       crpc_wi;
@@ -253,18 +253,18 @@ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
 do {                                                                    \
         CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n",                         \
                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
-               cfs_atomic_read(&(rpc)->crpc_refcount));                 \
-        LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0);            \
-        cfs_atomic_inc(&(rpc)->crpc_refcount);                          \
+              atomic_read(&(rpc)->crpc_refcount));                 \
+       LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);            \
+       atomic_inc(&(rpc)->crpc_refcount);                          \
 } while (0)
 
 #define srpc_client_rpc_decref(rpc)                                     \
 do {                                                                    \
         CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n",                         \
                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
-               cfs_atomic_read(&(rpc)->crpc_refcount));                 \
-        LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0);            \
-        if (cfs_atomic_dec_and_test(&(rpc)->crpc_refcount))             \
+              atomic_read(&(rpc)->crpc_refcount));                 \
+       LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);            \
+       if (atomic_dec_and_test(&(rpc)->crpc_refcount))             \
                 srpc_destroy_client_rpc(rpc);                           \
 } while (0)
 
@@ -344,9 +344,9 @@ typedef struct {
         stt_timer_t       sn_timer;
         cfs_list_t        sn_batches; /* list of batches */
         char              sn_name[LST_NAME_SIZE];
-        cfs_atomic_t      sn_refcount;
-        cfs_atomic_t      sn_brw_errors;
-        cfs_atomic_t      sn_ping_errors;
+       atomic_t      sn_refcount;
+       atomic_t      sn_brw_errors;
+       atomic_t      sn_ping_errors;
         cfs_time_t        sn_started;
 } sfw_session_t;
 
@@ -358,7 +358,7 @@ typedef struct {
         lst_bid_t         bat_id;        /* batch id */
         int               bat_error;     /* error code of batch */
         sfw_session_t    *bat_session;   /* batch's session */
-        cfs_atomic_t      bat_nactive;   /* # of active tests */
+       atomic_t      bat_nactive;   /* # of active tests */
         cfs_list_t        bat_tests;     /* test instances */
 } sfw_batch_t;
 
@@ -387,10 +387,10 @@ typedef struct sfw_test_instance {
        /* status of test instance */
        spinlock_t              tsi_lock;         /* serialize */
        unsigned int            tsi_stopping:1;   /* test is stopping */
-        cfs_atomic_t            tsi_nactive;      /* # of active test unit */
-        cfs_list_t              tsi_units;        /* test units */
-        cfs_list_t              tsi_free_rpcs;    /* free rpcs */
-        cfs_list_t              tsi_active_rpcs;  /* active rpcs */
+       atomic_t            tsi_nactive;      /* # of active test unit */
+       cfs_list_t              tsi_units;        /* test units */
+       cfs_list_t              tsi_free_rpcs;    /* free rpcs */
+       cfs_list_t              tsi_active_rpcs;  /* active rpcs */
 
        union {
                test_ping_req_t         ping;     /* ping parameter */
@@ -521,20 +521,20 @@ void srpc_shutdown(void);
 static inline void
 srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
 {
-        LASSERT (rpc != NULL);
-        LASSERT (!srpc_event_pending(rpc));
-        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0);
+       LASSERT (rpc != NULL);
+       LASSERT (!srpc_event_pending(rpc));
+       LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
 #ifndef __KERNEL__
-        LASSERT (rpc->crpc_bulk.bk_pages == NULL);
+       LASSERT (rpc->crpc_bulk.bk_pages == NULL);
 #endif
 
-        if (rpc->crpc_fini == NULL) {
-                LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
-        } else {
-                (*rpc->crpc_fini) (rpc);
-        }
+       if (rpc->crpc_fini == NULL) {
+               LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+       } else {
+               (*rpc->crpc_fini) (rpc);
+       }
 
-        return;
+       return;
 }
 
 static inline void
@@ -552,10 +552,10 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
        swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
                          lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
        spin_lock_init(&rpc->crpc_lock);
-        cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
+       atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
 
-        rpc->crpc_dest         = peer;
-        rpc->crpc_priv         = priv;
+       rpc->crpc_dest         = peer;
+       rpc->crpc_priv         = priv;
         rpc->crpc_service      = service;
         rpc->crpc_bulk.bk_len  = bulklen;
         rpc->crpc_bulk.bk_niov = nbulkiov;