Whamcloud - gitweb
LU-6068 misc: update Intel copyright messages 2014
[fs/lustre-release.git] / lnet / klnds / ralnd / ralnd.c
index 91ff1b1..a4b06ca 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -294,17 +294,17 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
 int
 kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
 {
-        kra_conn_t    *conn;
-        RAP_RETURN     rrc;
+       kra_conn_t    *conn;
+       RAP_RETURN     rrc;
 
-        LASSERT (!cfs_in_interrupt());
-        LIBCFS_ALLOC(conn, sizeof(*conn));
+       LASSERT (!in_interrupt());
+       LIBCFS_ALLOC(conn, sizeof(*conn));
 
-        if (conn == NULL)
-                return -ENOMEM;
+       if (conn == NULL)
+               return -ENOMEM;
 
         memset(conn, 0, sizeof(*conn));
-        cfs_atomic_set(&conn->rac_refcount, 1);
+       atomic_set(&conn->rac_refcount, 1);
         CFS_INIT_LIST_HEAD(&conn->rac_list);
         CFS_INIT_LIST_HEAD(&conn->rac_hashlist);
         CFS_INIT_LIST_HEAD(&conn->rac_schedlist);
@@ -327,7 +327,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
                 return -ENETDOWN;
         }
 
-        cfs_atomic_inc(&kranal_data.kra_nconns);
+       atomic_inc(&kranal_data.kra_nconns);
         *connp = conn;
         return 0;
 }
@@ -335,81 +335,81 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
 void
 kranal_destroy_conn(kra_conn_t *conn)
 {
-        RAP_RETURN         rrc;
-
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (!conn->rac_scheduled);
-        LASSERT (cfs_list_empty(&conn->rac_list));
-        LASSERT (cfs_list_empty(&conn->rac_hashlist));
-        LASSERT (cfs_list_empty(&conn->rac_schedlist));
-        LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0);
-        LASSERT (cfs_list_empty(&conn->rac_fmaq));
-        LASSERT (cfs_list_empty(&conn->rac_rdmaq));
-        LASSERT (cfs_list_empty(&conn->rac_replyq));
-
-        rrc = RapkDestroyRi(conn->rac_device->rad_handle,
-                            conn->rac_rihandle);
-        LASSERT (rrc == RAP_SUCCESS);
-
-        if (conn->rac_peer != NULL)
-                kranal_peer_decref(conn->rac_peer);
-
-        LIBCFS_FREE(conn, sizeof(*conn));
-        cfs_atomic_dec(&kranal_data.kra_nconns);
+       RAP_RETURN         rrc;
+
+       LASSERT (!in_interrupt());
+       LASSERT (!conn->rac_scheduled);
+       LASSERT (cfs_list_empty(&conn->rac_list));
+       LASSERT (cfs_list_empty(&conn->rac_hashlist));
+       LASSERT (cfs_list_empty(&conn->rac_schedlist));
+       LASSERT (atomic_read(&conn->rac_refcount) == 0);
+       LASSERT (cfs_list_empty(&conn->rac_fmaq));
+       LASSERT (cfs_list_empty(&conn->rac_rdmaq));
+       LASSERT (cfs_list_empty(&conn->rac_replyq));
+
+       rrc = RapkDestroyRi(conn->rac_device->rad_handle,
+                           conn->rac_rihandle);
+       LASSERT (rrc == RAP_SUCCESS);
+
+       if (conn->rac_peer != NULL)
+               kranal_peer_decref(conn->rac_peer);
+
+       LIBCFS_FREE(conn, sizeof(*conn));
+       atomic_dec(&kranal_data.kra_nconns);
 }
 
 void
 kranal_terminate_conn_locked (kra_conn_t *conn)
 {
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
-        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
-        LASSERT (cfs_list_empty(&conn->rac_list));
+       LASSERT (!in_interrupt());
+       LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
+       LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+       LASSERT (cfs_list_empty(&conn->rac_list));
 
-        /* Remove from conn hash table: no new callbacks */
-        cfs_list_del_init(&conn->rac_hashlist);
-        kranal_conn_decref(conn);
+       /* Remove from conn hash table: no new callbacks */
+       cfs_list_del_init(&conn->rac_hashlist);
+       kranal_conn_decref(conn);
 
-        conn->rac_state = RANAL_CONN_CLOSED;
+       conn->rac_state = RANAL_CONN_CLOSED;
 
-        /* schedule to clear out all uncompleted comms in context of dev's
-         * scheduler */
-        kranal_schedule_conn(conn);
+       /* schedule to clear out all uncompleted comms in context of dev's
+        * scheduler */
+       kranal_schedule_conn(conn);
 }
 
 void
 kranal_close_conn_locked (kra_conn_t *conn, int error)
 {
-        kra_peer_t        *peer = conn->rac_peer;
+       kra_peer_t        *peer = conn->rac_peer;
 
-        CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
-                     "closing conn to %s: error %d\n",
-                     libcfs_nid2str(peer->rap_nid), error);
+       CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR,
+                    "closing conn to %s: error %d\n",
+                    libcfs_nid2str(peer->rap_nid), error);
 
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
-        LASSERT (!cfs_list_empty(&conn->rac_hashlist));
-        LASSERT (!cfs_list_empty(&conn->rac_list));
+       LASSERT (!in_interrupt());
+       LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
+       LASSERT (!cfs_list_empty(&conn->rac_hashlist));
+       LASSERT (!cfs_list_empty(&conn->rac_list));
 
-        cfs_list_del_init(&conn->rac_list);
+       cfs_list_del_init(&conn->rac_list);
 
-        if (cfs_list_empty(&peer->rap_conns) &&
-            peer->rap_persistence == 0) {
-                /* Non-persistent peer with no more conns... */
-                kranal_unlink_peer_locked(peer);
-        }
+       if (cfs_list_empty(&peer->rap_conns) &&
+           peer->rap_persistence == 0) {
+               /* Non-persistent peer with no more conns... */
+               kranal_unlink_peer_locked(peer);
+       }
 
-        /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
-         * full timeout.  If we get a CLOSE we know the peer has stopped all
-         * RDMA.  Otherwise if we wait for the full timeout we can also be sure
-         * all RDMA has stopped. */
-        conn->rac_last_rx = jiffies;
-        cfs_mb();
+       /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
+        * full timeout.  If we get a CLOSE we know the peer has stopped all
+        * RDMA.  Otherwise if we wait for the full timeout we can also be sure
+        * all RDMA has stopped. */
+       conn->rac_last_rx = jiffies;
+       smp_mb();
 
-        conn->rac_state = RANAL_CONN_CLOSING;
-        kranal_schedule_conn(conn);             /* schedule sending CLOSE */
+       conn->rac_state = RANAL_CONN_CLOSING;
+       kranal_schedule_conn(conn);             /* schedule sending CLOSE */
 
-        kranal_conn_decref(conn);               /* lose peer's ref */
+       kranal_conn_decref(conn);               /* lose peer's ref */
 }
 
 void
@@ -838,7 +838,8 @@ kranal_connect (kra_peer_t *peer)
                 MIN(peer->rap_reconnect_interval,
                     *kranal_tunables.kra_max_reconnect_interval);
 
-       peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ;
+       peer->rap_reconnect_time = jiffies +
+               msecs_to_jiffies(peer->rap_reconnect_interval * MSEC_PER_SEC);
 
         /* Grab all blocked packets while we have the global lock */
         cfs_list_add(&zombies, &peer->rap_tx_queue);
@@ -913,7 +914,7 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
         memset(peer, 0, sizeof(*peer));         /* zero flags etc */
 
         peer->rap_nid = nid;
-        cfs_atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
+       atomic_set(&peer->rap_refcount, 1);     /* 1 ref for caller */
 
         CFS_INIT_LIST_HEAD(&peer->rap_list);
         CFS_INIT_LIST_HEAD(&peer->rap_connd_list);
@@ -934,7 +935,7 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
                 return -ESHUTDOWN;
         }
 
-        cfs_atomic_inc(&kranal_data.kra_npeers);
+       atomic_inc(&kranal_data.kra_npeers);
 
        write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
@@ -948,7 +949,7 @@ kranal_destroy_peer (kra_peer_t *peer)
         CDEBUG(D_NET, "peer %s %p deleted\n", 
                libcfs_nid2str(peer->rap_nid), peer);
 
-        LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0);
+       LASSERT (atomic_read(&peer->rap_refcount) == 0);
         LASSERT (peer->rap_persistence == 0);
         LASSERT (!kranal_peer_active(peer));
         LASSERT (!peer->rap_connecting);
@@ -962,7 +963,7 @@ kranal_destroy_peer (kra_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        cfs_atomic_dec(&kranal_data.kra_npeers);
+       atomic_dec(&kranal_data.kra_npeers);
 }
 
 kra_peer_t *
@@ -984,7 +985,7 @@ kranal_find_peer_locked (lnet_nid_t nid)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
                        peer, libcfs_nid2str(nid), 
-                       cfs_atomic_read(&peer->rap_refcount));
+                      atomic_read(&peer->rap_refcount));
                 return peer;
         }
         return NULL;
@@ -1174,8 +1175,8 @@ kranal_get_conn_by_idx (int index)
                                                       rac_list);
                                 CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn,
                                        libcfs_nid2str(conn->rac_peer->rap_nid),
-                                       cfs_atomic_read(&conn->rac_refcount));
-                                cfs_atomic_inc(&conn->rac_refcount);
+                                      atomic_read(&conn->rac_refcount));
+                               atomic_inc(&conn->rac_refcount);
                                read_unlock(&kranal_data.kra_global_lock);
                                 return conn;
                         }
@@ -1437,7 +1438,7 @@ kranal_shutdown (lnet_ni_t *ni)
         unsigned long flags;
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         LASSERT (ni == kranal_data.kra_ni);
         LASSERT (ni->ni_data == &kranal_data);
@@ -1475,11 +1476,11 @@ kranal_shutdown (lnet_ni_t *ni)
 
                 /* Wait for all peers to be freed */
                 i = 2;
-                while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) {
+               while (atomic_read(&kranal_data.kra_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
                                "waiting for %d peers to close down\n",
-                               cfs_atomic_read(&kranal_data.kra_npeers));
+                              atomic_read(&kranal_data.kra_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
                 /* fall through */
@@ -1493,7 +1494,7 @@ kranal_shutdown (lnet_ni_t *ni)
          * while there are still active connds, but these will be temporary
          * since peer creation always fails after the listener has started to
          * shut down. */
-        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
         
         /* Flag threads to terminate */
         kranal_data.kra_shutdown = 1;
@@ -1517,15 +1518,15 @@ kranal_shutdown (lnet_ni_t *ni)
 
         /* Wait for threads to exit */
         i = 2;
-        while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) {
+       while (atomic_read(&kranal_data.kra_nthreads) != 0) {
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                        "Waiting for %d threads to terminate\n",
-                       cfs_atomic_read(&kranal_data.kra_nthreads));
+                      atomic_read(&kranal_data.kra_nthreads));
                 cfs_pause(cfs_time_seconds(1));
         }
 
-        LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_npeers) == 0);
         if (kranal_data.kra_peers != NULL) {
                 for (i = 0; i < kranal_data.kra_peer_hash_size; i++)
                         LASSERT (cfs_list_empty(&kranal_data.kra_peers[i]));
@@ -1535,7 +1536,7 @@ kranal_shutdown (lnet_ni_t *ni)
                             kranal_data.kra_peer_hash_size);
         }
 
-        LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0);
+       LASSERT (atomic_read(&kranal_data.kra_nconns) == 0);
         if (kranal_data.kra_conns != NULL) {
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++)
                         LASSERT (cfs_list_empty(&kranal_data.kra_conns[i]));
@@ -1551,7 +1552,7 @@ kranal_shutdown (lnet_ni_t *ni)
         kranal_free_txdescs(&kranal_data.kra_idle_txs);
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        kranal_data.kra_init = RANAL_INIT_NOTHING;
        module_put(THIS_MODULE);
@@ -1561,7 +1562,7 @@ int
 kranal_startup (lnet_ni_t *ni)
 {
         struct timeval    tv;
-        int               pkmem = cfs_atomic_read(&libcfs_kmemory);
+       int               pkmem = atomic_read(&libcfs_kmemory);
         int               rc;
         int               i;
         kra_device_t     *dev;