Whamcloud - gitweb
b=17087
authormaxim <maxim>
Sat, 29 Nov 2008 13:12:50 +0000 (13:12 +0000)
committermaxim <maxim>
Sat, 29 Nov 2008 13:12:50 +0000 (13:12 +0000)
i=rread
i=isaac
Landing the patch making ksocklnd more os-neutral (normalizing names of
lock primitives, moving linux-spcific code to socklnd_lib-linux.c)

libcfs/include/libcfs/linux/linux-lock.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/linux/linux-tcpip.h
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_lib-linux.c

index ddc6790..67a65cb 100644 (file)
@@ -193,4 +193,28 @@ static inline void lockdep_on(void)
 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 
+/*
+ * spinlock "implementation"
+ */
+
+typedef spinlock_t cfs_spinlock_t;
+
+#define cfs_spin_lock_init(lock) spin_lock_init(lock)
+#define cfs_spin_lock(lock)      spin_lock(lock)
+#define cfs_spin_lock_bh(lock)   spin_lock_bh(lock)
+#define cfs_spin_unlock(lock)    spin_unlock(lock)
+#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
+
+/*
+ * rwlock "implementation"
+ */
+
+typedef rwlock_t cfs_rwlock_t;
+
+#define cfs_rwlock_init(lock)      rwlock_init(lock)
+#define cfs_read_lock(lock)        read_lock(lock)
+#define cfs_read_unlock(lock)      read_unlock(lock)
+#define cfs_write_lock_bh(lock)    write_lock_bh(lock)
+#define cfs_write_unlock_bh(lock)  write_unlock_bh(lock)
+
 #endif /* __LIBCFS_LINUX_CFS_LOCK_H__ */
index 1f3cbee..23cc250 100644 (file)
@@ -135,6 +135,8 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
 #define CFS_TASK_UNINT                  TASK_UNINTERRUPTIBLE
 #define CFS_TASK_RUNNING                TASK_RUNNING
 
+#define cfs_set_current_state(state) set_current_state(state)
+
 typedef wait_queue_t                   cfs_waitlink_t;
 typedef wait_queue_head_t              cfs_waitq_t;
 typedef long                            cfs_task_state_t;
@@ -275,4 +277,30 @@ do {                                                              \
         ret = wait_event_interruptible_timeout(wq, c, timeout)
 #endif
 
+/*
+ * atomic
+ */
+
+typedef atomic_t cfs_atomic_t;
+
+#define cfs_atomic_read(atom)         atomic_read(atom)
+#define cfs_atomic_inc(atom)          atomic_inc(atom)
+#define cfs_atomic_dec(atom)          atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
+#define cfs_atomic_set(atom, value)   atomic_set(atom, value)
+#define cfs_atomic_add(value, atom)   atomic_add(value, atom)
+#define cfs_atomic_sub(value, atom)   atomic_sub(value, atom)
+
+/*
+ * membar
+ */
+
+#define cfs_mb() mb()
+
+/*
+ * interrupt
+ */
+
+#define cfs_in_interrupt() in_interrupt()
+
 #endif
index f675173..d74b4ad 100644 (file)
@@ -72,8 +72,18 @@ typedef struct socket   cfs_socket_t;
 #endif
 
 #define SOCK_SNDBUF(so)         ((so)->sk->sk_sndbuf)
-#define SOCK_WMEM_QUEUED(so)    ((so)->sk->sk_wmem_queued)
-#define SOCK_ERROR(so)          ((so)->sk->sk_err)
 #define SOCK_TEST_NOSPACE(so)   test_bit(SOCK_NOSPACE, &(so)->flags)
 
+static inline int
+libcfs_sock_error(struct socket *sock)
+{
+        return sock->sk->sk_err;
+}
+
+static inline int
+libcfs_sock_wmem_queued(struct socket *sock)
+{
+        return sock->sk->sk_wmem_queued;
+}
+
 #endif
index 8190d3d..5201846 100644 (file)
@@ -73,7 +73,7 @@ ksocknal_create_route (__u32 ipaddr, int port)
         if (route == NULL)
                 return (NULL);
 
-        atomic_set (&route->ksnr_refcount, 1);
+        cfs_atomic_set (&route->ksnr_refcount, 1);
         route->ksnr_peer = NULL;
         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
         route->ksnr_ipaddr = ipaddr;
@@ -91,7 +91,7 @@ ksocknal_create_route (__u32 ipaddr, int port)
 void
 ksocknal_destroy_route (ksock_route_t *route)
 {
-        LASSERT (atomic_read(&route->ksnr_refcount) == 0);
+        LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
 
         if (route->ksnr_peer != NULL)
                 ksocknal_peer_decref(route->ksnr_peer);
@@ -107,7 +107,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         LASSERT (id.nid != LNET_NID_ANY);
         LASSERT (id.pid != LNET_PID_ANY);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         LIBCFS_ALLOC (peer, sizeof (*peer));
         if (peer == NULL)
@@ -117,7 +117,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         peer->ksnp_ni = ni;
         peer->ksnp_id = id;
-        atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
+        cfs_atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
         peer->ksnp_closing = 0;
         peer->ksnp_accepting = 0;
         peer->ksnp_zc_next_cookie = 1;
@@ -126,12 +126,12 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
         CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
         CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
         CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
-        spin_lock_init(&peer->ksnp_lock);
+        cfs_spin_lock_init(&peer->ksnp_lock);
 
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
 
         if (net->ksnn_shutdown) {
-                spin_unlock_bh (&net->ksnn_lock);
+                cfs_spin_unlock_bh (&net->ksnn_lock);
 
                 LIBCFS_FREE(peer, sizeof(*peer));
                 CERROR("Can't create peer: network shutdown\n");
@@ -140,7 +140,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         net->ksnn_npeers++;
 
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         *peerp = peer;
         return 0;
@@ -154,7 +154,7 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
         CDEBUG (D_NET, "peer %s %p deleted\n",
                 libcfs_id2str(peer->ksnp_id), peer);
 
-        LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
         LASSERT (peer->ksnp_accepting == 0);
         LASSERT (list_empty (&peer->ksnp_conns));
         LASSERT (list_empty (&peer->ksnp_routes));
@@ -167,9 +167,9 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
          * until they are destroyed, so we can be assured that _all_ state to
          * do with this peer has been cleaned up when its refcount drops to
          * zero. */
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         net->ksnn_npeers--;
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 }
 
 ksock_peer_t *
@@ -194,7 +194,7 @@ ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
                        peer, libcfs_id2str(id),
-                       atomic_read(&peer->ksnp_refcount));
+                       cfs_atomic_read(&peer->ksnp_refcount));
                 return (peer);
         }
         return (NULL);
@@ -205,11 +205,11 @@ ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
 {
         ksock_peer_t     *peer;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
         peer = ksocknal_find_peer_locked (ni, id);
         if (peer != NULL)                       /* +1 ref for caller? */
                 ksocknal_peer_addref(peer);
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return (peer);
 }
@@ -257,7 +257,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
         int                j;
         int                rc = -ENOENT;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 
@@ -315,7 +315,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                 }
         }
  out:
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (rc);
 }
 
@@ -470,7 +470,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 return (-ENOMEM);
         }
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         /* always called with a ref on ni, so shutdown can't have started */
         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -502,7 +502,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 route2->ksnr_share_count++;
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (0);
 }
@@ -574,7 +574,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
         int                i;
         int                rc = -ENOENT;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
@@ -611,7 +611,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_txlist_done(ni, &zombies, 1);
 
@@ -627,7 +627,7 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
         struct list_head  *ctmp;
         int                i;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
@@ -644,13 +644,13 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
 
                                 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
                                 ksocknal_conn_addref(conn);
-                                read_unlock (&ksocknal_data.ksnd_global_lock);
+                                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                                 return (conn);
                         }
                 }
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (NULL);
 }
 
@@ -695,7 +695,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         int                i;
         int                nip;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         nip = net->ksnn_ninterfaces;
         LASSERT (nip <= LNET_MAX_INTERFACES);
@@ -703,7 +703,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         /* Only offer interfaces for additional connections if I have 
          * more than one. */
         if (nip < 2) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return 0;
         }
 
@@ -712,7 +712,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
                 LASSERT (ipaddrs[i] != 0);
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (nip);
 }
 
@@ -751,7 +751,7 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
 int
 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 {
-        rwlock_t           *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
         ksock_net_t        *net = peer->ksnp_ni->ni_data;
         ksock_interface_t  *iface;
         ksock_interface_t  *best_iface;
@@ -773,7 +773,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Also note that I'm not going to return more than n_peerips
          * interfaces, even if I have more myself */
 
-        write_lock_bh (global_lock);
+        cfs_write_lock_bh (global_lock);
 
         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
@@ -849,7 +849,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Overwrite input peer IP addresses */
         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         return (n_ips);
 }
@@ -859,7 +859,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
         ksock_route_t      *newroute = NULL;
-        rwlock_t           *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
         lnet_ni_t          *ni = peer->ksnp_ni;
         ksock_net_t        *net = ni->ni_data;
         struct list_head   *rtmp;
@@ -877,12 +877,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
          * expecting to be dealing with small numbers of interfaces, so the
          * O(n**3)-ness here shouldn't matter */
 
-        write_lock_bh (global_lock);
+        cfs_write_lock_bh (global_lock);
 
         if (net->ksnn_ninterfaces < 2) {
                 /* Only create additional connections 
                  * if I have > 1 interface */
-                write_unlock_bh (global_lock);
+                cfs_write_unlock_bh (global_lock);
                 return;
         }
 
@@ -892,13 +892,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 if (newroute != NULL) {
                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
                 } else {
-                        write_unlock_bh (global_lock);
+                        cfs_write_unlock_bh (global_lock);
 
                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
                         if (newroute == NULL)
                                 return;
 
-                        write_lock_bh (global_lock);
+                        cfs_write_lock_bh (global_lock);
                 }
 
                 if (peer->ksnp_closing) {
@@ -966,7 +966,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 newroute = NULL;
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
         if (newroute != NULL)
                 ksocknal_route_decref(newroute);
 }
@@ -994,12 +994,12 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
         cr->ksncr_ni   = ni;
         cr->ksncr_sock = sock;
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
         cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
         return 0;
 }
 
@@ -1021,7 +1021,7 @@ int
 ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                       cfs_socket_t *sock, int type)
 {
-        rwlock_t          *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t      *global_lock = &ksocknal_data.ksnd_global_lock;
         CFS_LIST_HEAD     (zombies);
         lnet_process_id_t  peerid;
         struct list_head  *tmp;
@@ -1057,10 +1057,10 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_sock = sock;
         /* 2 ref, 1 for conn, another extra ref prevents socket
          * being closed before establishment of connection */
-        atomic_set (&conn->ksnc_sock_refcount, 2);
+        cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
         conn->ksnc_type = type;
         ksocknal_lib_save_callback(sock, conn);
-        atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+        cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
 
         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(sock);
         conn->ksnc_rx_ready = 0;
@@ -1070,7 +1070,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_tx_ready = 0;
         conn->ksnc_tx_scheduled = 0;
         conn->ksnc_tx_mono = NULL;
-        atomic_set (&conn->ksnc_tx_nob, 0);
+        cfs_atomic_set (&conn->ksnc_tx_nob, 0);
 
         LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
                                      kshm_ips[LNET_MAX_INTERFACES]));
@@ -1097,9 +1097,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
                 peerid = peer->ksnp_id;
 
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 conn->ksnc_proto = peer->ksnp_proto;
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
 
                 if (conn->ksnc_proto == NULL) {
                         conn->ksnc_proto = &ksocknal_protocol_v2x;
@@ -1130,13 +1130,13 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
 
         if (active) {
                 ksocknal_peer_addref(peer);
-                write_lock_bh (global_lock);
+                cfs_write_lock_bh (global_lock);
         } else {
                 rc = ksocknal_create_peer(&peer, ni, peerid);
                 if (rc != 0)
                         goto failed_1;
 
-                write_lock_bh (global_lock);
+                cfs_write_lock_bh (global_lock);
 
                 /* called with a ref on ni, so shutdown can't have started */
                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -1267,9 +1267,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_scheduler = sched;
 
         /* Set the deadline for the outgoing HELLO to drain */
-        conn->ksnc_tx_bufnob = SOCK_WMEM_QUEUED(sock);
+        conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();       /* order with adding to peer's conn list */
+        cfs_mb();   /* order with adding to peer's conn list */
 
         list_add (&conn->ksnc_list, &peer->ksnp_conns);
         ksocknal_conn_addref(conn);
@@ -1287,7 +1287,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_queue_tx_locked (tx, conn);
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         /* We've now got a new connection.  Any errors from here on are just
          * like "normal" comms errors and we close the connection normally.
@@ -1327,7 +1327,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (rc == 0)
                 rc = ksocknal_lib_setup_sock(sock);
 
-        write_lock_bh(global_lock);
+        cfs_write_lock_bh(global_lock);
 
         /* NB my callbacks block while I hold ksnd_global_lock */
         ksocknal_lib_set_callback(sock, conn);
@@ -1335,12 +1335,12 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (!active)
                 peer->ksnp_accepting--;
 
-        write_unlock_bh(global_lock);
+        cfs_write_unlock_bh(global_lock);
 
         if (rc != 0) {
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 ksocknal_close_conn_locked(conn, rc);
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
         } else if (ksocknal_connsock_addref(conn) == 0) {
                 /* Allow I/O to proceed. */
                 ksocknal_read_callback(conn);
@@ -1361,7 +1361,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_unlink_peer_locked(peer);
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         if (warn != NULL) {
                 if (rc < 0)
@@ -1381,9 +1381,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
                 }
 
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 peer->ksnp_accepting--;
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
         }
 
         ksocknal_txlist_done(ni, &zombies, 1);
@@ -1460,12 +1460,12 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                 }
         }
 
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1478,7 +1478,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
          * tell LNET I think the peer is dead if it's to another kernel and
          * there are no connections or connection attempts in existance. */
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
             list_empty(&peer->ksnp_conns) &&
@@ -1490,7 +1490,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
                                          peer->ksnp_last_alive));
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         if (notify)
                 lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
@@ -1509,7 +1509,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
          * abort all buffered data */
         LASSERT (conn->ksnc_sock == NULL);
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
                                            ksock_tx_t, tx_zc_list) {
@@ -1523,7 +1523,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
                 list_add(&tx->tx_zc_list, &zlist);
         }
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         while (!list_empty(&zlist)) {
                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
@@ -1547,7 +1547,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
         LASSERT(conn->ksnc_closing);
 
         /* wake up the scheduler to "send" all remaining packets to /dev/null */
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         /* a closing conn is always ready to tx */
         conn->ksnc_tx_ready = 1;
@@ -1563,10 +1563,10 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
         /* serialise with callbacks */
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
 
@@ -1581,7 +1581,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 peer->ksnp_error = 0;     /* avoid multiple notifications */
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (failed)
                 ksocknal_peer_failed(peer);
@@ -1599,13 +1599,13 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 {
         /* Queue the conn for the reaper to destroy */
 
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) == 0);
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
         cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1614,8 +1614,8 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
         /* Final coup-de-grace of the reaper */
         CDEBUG (D_NET, "connection %p\n", conn);
 
-        LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
-        LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+        LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
+        LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
         LASSERT (conn->ksnc_sock == NULL);
         LASSERT (conn->ksnc_route == NULL);
         LASSERT (!conn->ksnc_tx_scheduled);
@@ -1693,11 +1693,11 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
         __u32             ipaddr = conn->ksnc_ipaddr;
         int               count;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (count);
 }
@@ -1713,7 +1713,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
         int                 i;
         int                 count = 0;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
@@ -1735,7 +1735,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         /* wildcards always succeed */
         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
@@ -1776,7 +1776,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
         ksock_conn_t     *conn;
 
         for (index = 0; ; index++) {
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                 i = 0;
                 conn = NULL;
@@ -1789,7 +1789,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
                         }
                 }
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                 if (conn == NULL)
                         break;
@@ -1811,7 +1811,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 for (j = 0; ; j++) {
-                        read_lock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                         index = 0;
                         peer = NULL;
@@ -1834,7 +1834,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
                                 }
                         }
 
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                         if (peer != NULL) {
                                 rc = 0;
@@ -1865,7 +1865,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
             netmask == 0)
                 return (-EINVAL);
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         iface = ksocknal_ip2iface(ni, ipaddress);
         if (iface != NULL) {
@@ -1902,7 +1902,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                 /* NB only new connections will pay attention to the new interface! */
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -1960,7 +1960,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
         int                i;
         int                j;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
@@ -1989,7 +1989,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -2006,7 +2006,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                 ksock_net_t       *net = ni->ni_data;
                 ksock_interface_t *iface;
 
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
                         rc = -ENOENT;
@@ -2020,7 +2020,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                         data->ioc_u32[3] = iface->ksni_nroutes;
                 }
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return rc;
         }
 
@@ -2125,7 +2125,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 void
 ksocknal_free_buffers (void)
 {
-        LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+        LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
 
         if (ksocknal_data.ksnd_schedulers != NULL)
                 LIBCFS_FREE (ksocknal_data.ksnd_schedulers,
@@ -2135,7 +2135,7 @@ ksocknal_free_buffers (void)
                      sizeof (struct list_head) *
                      ksocknal_data.ksnd_peer_hash_size);
 
-        spin_lock(&ksocknal_data.ksnd_tx_lock);
+        cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                 struct list_head  zlist;
@@ -2143,7 +2143,7 @@ ksocknal_free_buffers (void)
 
                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
                 while(!list_empty(&zlist)) {
                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
@@ -2151,7 +2151,7 @@ ksocknal_free_buffers (void)
                         LIBCFS_FREE(tx, tx->tx_desc_size);
                 }
         } else {
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         }
 }
 
@@ -2162,7 +2162,7 @@ ksocknal_base_shutdown (void)
         int            i;
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
         LASSERT (ksocknal_data.ksnd_nnets == 0);
 
         switch (ksocknal_data.ksnd_init) {
@@ -2203,17 +2203,17 @@ ksocknal_base_shutdown (void)
                         }
 
                 i = 4;
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
                 while (ksocknal_data.ksnd_nthreads != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "waiting for %d threads to terminate\n",
                                 ksocknal_data.ksnd_nthreads);
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                         cfs_pause(cfs_time_seconds(1));
-                        read_lock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
                 }
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                 ksocknal_free_buffers();
 
@@ -2222,27 +2222,11 @@ ksocknal_base_shutdown (void)
         }
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
 
         PORTAL_MODULE_UNUSE;
 }
 
-
-__u64
-ksocknal_new_incarnation (void)
-{
-        struct timeval tv;
-
-        /* The incarnation number is the time this module loaded and it
-         * identifies this particular instance of the socknal.  Hopefully
-         * we won't be able to reboot more frequently than 1MHz for the
-         * forseeable future :) */
-
-        do_gettimeofday(&tv);
-
-        return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
 int
 ksocknal_base_startup (void)
 {
@@ -2263,20 +2247,20 @@ ksocknal_base_startup (void)
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
                 CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
 
-        rwlock_init(&ksocknal_data.ksnd_global_lock);
+        cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
 
-        spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
         cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
 
-        spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
 
         /* NB memset above zeros whole of ksocknal_data, including
@@ -2295,7 +2279,7 @@ ksocknal_base_startup (void)
         for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
                 ksock_sched_t *kss = &ksocknal_data.ksnd_schedulers[i];
 
-                spin_lock_init (&kss->kss_lock);
+                cfs_spin_lock_init (&kss->kss_lock);
                 CFS_INIT_LIST_HEAD (&kss->kss_rx_conns);
                 CFS_INIT_LIST_HEAD (&kss->kss_tx_conns);
                 CFS_INIT_LIST_HEAD (&kss->kss_zombie_noop_txs);
@@ -2349,7 +2333,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
         struct list_head *tmp;
         int               i;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
@@ -2368,7 +2352,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
                        "closing %d, accepting %d, err %d, zcookie "LPU64", "
                        "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
-                       atomic_read(&peer->ksnp_refcount),
+                       cfs_atomic_read(&peer->ksnp_refcount),
                        peer->ksnp_sharecount, peer->ksnp_closing,
                        peer->ksnp_accepting, peer->ksnp_error,
                        peer->ksnp_zc_next_cookie,
@@ -2378,7 +2362,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 list_for_each (tmp, &peer->ksnp_routes) {
                         route = list_entry(tmp, ksock_route_t, ksnr_list);
                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
-                               "del %d\n", atomic_read(&route->ksnr_refcount),
+                               "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
                                route->ksnr_scheduled, route->ksnr_connecting,
                                route->ksnr_connected, route->ksnr_deleted);
                 }
@@ -2386,13 +2370,13 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 list_for_each (tmp, &peer->ksnp_conns) {
                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
-                               atomic_read(&conn->ksnc_conn_refcount),
-                               atomic_read(&conn->ksnc_sock_refcount),
+                               cfs_atomic_read(&conn->ksnc_conn_refcount),
+                               cfs_atomic_read(&conn->ksnc_sock_refcount),
                                conn->ksnc_type, conn->ksnc_closing);
                 }
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return;
 }
 
@@ -2409,18 +2393,18 @@ ksocknal_shutdown (lnet_ni_t *ni)
         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
         LASSERT(ksocknal_data.ksnd_nnets > 0);
 
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         net->ksnn_shutdown = 1;                 /* prevent new peers */
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         /* Delete all peers */
         ksocknal_del_peer(ni, anyid, 0);
 
         /* Wait for all peer state to clean up */
         i = 2;
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         while (net->ksnn_npeers != 0) {
-                spin_unlock_bh (&net->ksnn_lock);
+                cfs_spin_unlock_bh (&net->ksnn_lock);
 
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
@@ -2430,9 +2414,9 @@ ksocknal_shutdown (lnet_ni_t *ni)
 
                 ksocknal_debug_peerhash(ni);
 
-                spin_lock_bh (&net->ksnn_lock);
+                cfs_spin_lock_bh (&net->ksnn_lock);
         }
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
@@ -2521,8 +2505,8 @@ ksocknal_startup (lnet_ni_t *ni)
                 goto fail_0;
 
         memset(net, 0, sizeof(*net));
-        spin_lock_init(&net->ksnn_lock);
-        net->ksnn_incarnation = ksocknal_new_incarnation();
+        cfs_spin_lock_init(&net->ksnn_lock);
+        net->ksnn_incarnation = ksocknal_lib_new_incarnation();
         ni->ni_data = net;
         ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
         ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peercredits;
index 32fd06e..b4ee146 100644 (file)
@@ -69,7 +69,7 @@
 
 typedef struct                                  /* per scheduler state */
 {
-        spinlock_t        kss_lock;             /* serialise */
+        cfs_spinlock_t    kss_lock;             /* serialise */
         struct list_head  kss_rx_conns;         /* conn waiting to be read */
         struct list_head  kss_tx_conns;         /* conn waiting to be written */
         struct list_head  kss_zombie_noop_txs;  /* zombie noop tx list */
@@ -140,7 +140,7 @@ typedef struct
 typedef struct
 {
         __u64             ksnn_incarnation;     /* my epoch */
-        spinlock_t        ksnn_lock;            /* serialise */
+        cfs_spinlock_t    ksnn_lock;            /* serialise */
         int               ksnn_npeers;          /* # peers */
         int               ksnn_shutdown;        /* shutting down? */
         int               ksnn_ninterfaces;     /* IP interfaces */
@@ -152,7 +152,7 @@ typedef struct
         int               ksnd_init;            /* initialisation state */
         int               ksnd_nnets;           /* # networks set up */
 
-        rwlock_t          ksnd_global_lock;     /* stabilize peer/conn ops */
+        cfs_rwlock_t      ksnd_global_lock;     /* stabilize peer/conn ops */
         struct list_head *ksnd_peers;           /* hash table of all my known peers */
         int               ksnd_peer_hash_size;  /* size of ksnd_peers */
 
@@ -161,14 +161,14 @@ typedef struct
         int               ksnd_nschedulers;     /* # schedulers */
         ksock_sched_t    *ksnd_schedulers;      /* their state */
 
-        atomic_t          ksnd_nactive_txs;     /* #active txs */
+        cfs_atomic_t      ksnd_nactive_txs;     /* #active txs */
 
         struct list_head  ksnd_deathrow_conns;  /* conns to close: reaper_lock*/
         struct list_head  ksnd_zombie_conns;    /* conns to free: reaper_lock */
         struct list_head  ksnd_enomem_conns;    /* conns to retry: reaper_lock*/
         cfs_waitq_t       ksnd_reaper_waitq;    /* reaper sleeps here */
         cfs_time_t        ksnd_reaper_waketime; /* when reaper will wake */
-        spinlock_t        ksnd_reaper_lock;     /* serialise */
+        cfs_spinlock_t    ksnd_reaper_lock;     /* serialise */
 
         int               ksnd_enomem_tx;       /* test ENOMEM sender */
         int               ksnd_stall_tx;        /* test sluggish sender */
@@ -178,10 +178,10 @@ typedef struct
         struct list_head  ksnd_connd_routes;    /* routes waiting to be connected */
         cfs_waitq_t       ksnd_connd_waitq;     /* connds sleep here */
         int               ksnd_connd_connecting;/* # connds connecting */
-        spinlock_t        ksnd_connd_lock;      /* serialise */
+        cfs_spinlock_t    ksnd_connd_lock;      /* serialise */
 
         struct list_head  ksnd_idle_noop_txs;   /* list head for freed noop tx */
-        spinlock_t        ksnd_tx_lock;         /* serialise, NOT safe in g_lock */
+        cfs_spinlock_t    ksnd_tx_lock;         /* serialise, NOT safe in g_lock */
 
         ksock_irqinfo_t   ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
 
@@ -209,7 +209,7 @@ typedef struct                                  /* transmit packet */
 {
         struct list_head        tx_list;        /* queue on conn for transmission etc */
         struct list_head        tx_zc_list;     /* queue on peer for ZC request */
-        atomic_t                tx_refcount;    /* tx reference count */
+        cfs_atomic_t            tx_refcount;    /* tx reference count */
         int                     tx_nob;         /* # packet bytes */
         int                     tx_resid;       /* residual bytes */
         int                     tx_niov;        /* # packet iovec frags */
@@ -259,8 +259,8 @@ typedef struct ksock_conn
         cfs_socket_t       *ksnc_sock;          /* actual socket */
         void               *ksnc_saved_data_ready; /* socket's original data_ready() callback */
         void               *ksnc_saved_write_space; /* socket's original write_space() callback */
-        atomic_t            ksnc_conn_refcount; /* conn refcount */
-        atomic_t            ksnc_sock_refcount; /* sock refcount */
+        cfs_atomic_t        ksnc_conn_refcount; /* conn refcount */
+        cfs_atomic_t        ksnc_sock_refcount; /* sock refcount */
         ksock_sched_t      *ksnc_scheduler;     /* who schedules this connection */
         __u32               ksnc_myipaddr;      /* my IP */
         __u32               ksnc_ipaddr;        /* peer's IP */
@@ -300,7 +300,7 @@ typedef struct ksock_conn
                                                  * b. noop ZC-ACK packet */
         cfs_time_t          ksnc_tx_deadline;   /* when (in jiffies) tx times out */
         int                 ksnc_tx_bufnob;     /* send buffer marker */
-        atomic_t            ksnc_tx_nob;        /* # bytes queued */
+        cfs_atomic_t        ksnc_tx_nob;        /* # bytes queued */
         int                 ksnc_tx_ready;      /* write space */
         int                 ksnc_tx_scheduled;  /* being progressed */
 } ksock_conn_t;
@@ -310,7 +310,7 @@ typedef struct ksock_route
         struct list_head    ksnr_list;          /* chain on peer route list */
         struct list_head    ksnr_connd_list;    /* chain on ksnr_connd_routes */
         struct ksock_peer  *ksnr_peer;          /* owning peer */
-        atomic_t            ksnr_refcount;      /* # users */
+        cfs_atomic_t        ksnr_refcount;      /* # users */
         cfs_time_t          ksnr_timeout;       /* when (in jiffies) reconnection can happen next */
         cfs_duration_t      ksnr_retry_interval; /* how long between retries */
         __u32               ksnr_myipaddr;      /* my IP */
@@ -328,7 +328,7 @@ typedef struct ksock_peer
 {
         struct list_head    ksnp_list;          /* stash on global peer list */
         lnet_process_id_t   ksnp_id;            /* who's on the other end(s) */
-        atomic_t            ksnp_refcount;      /* # users */
+        cfs_atomic_t        ksnp_refcount;      /* # users */
         int                 ksnp_sharecount;    /* lconf usage counter */
         int                 ksnp_closing;       /* being closed */
         int                 ksnp_accepting;     /* # passive connections pending */
@@ -339,7 +339,7 @@ typedef struct ksock_peer
         struct list_head    ksnp_conns;         /* all active connections */
         struct list_head    ksnp_routes;        /* routes */
         struct list_head    ksnp_tx_queue;      /* waiting packets */
-        spinlock_t          ksnp_lock;          /* serialize, NOT safe in g_lock */
+        cfs_spinlock_t      ksnp_lock;          /* serialize, NOT safe in g_lock */
         struct list_head    ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
         cfs_time_t          ksnp_last_alive;    /* when (in jiffies) I was last alive */
         lnet_ni_t          *ksnp_ni;            /* which network */
@@ -399,8 +399,8 @@ ksocknal_nid2peerlist (lnet_nid_t nid)
 static inline void
 ksocknal_conn_addref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
-        atomic_inc(&conn->ksnc_conn_refcount);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+        cfs_atomic_inc(&conn->ksnc_conn_refcount);
 }
 
 extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
@@ -409,8 +409,8 @@ extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
 static inline void
 ksocknal_conn_decref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
-        if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&conn->ksnc_conn_refcount))
                 ksocknal_queue_zombie_conn(conn);
 }
 
@@ -419,13 +419,13 @@ ksocknal_connsock_addref (ksock_conn_t *conn)
 {
         int   rc = -ESHUTDOWN;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
         if (!conn->ksnc_closing) {
-                LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
-                atomic_inc(&conn->ksnc_sock_refcount);
+                LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+                cfs_atomic_inc(&conn->ksnc_sock_refcount);
                 rc = 0;
         }
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -433,8 +433,8 @@ ksocknal_connsock_addref (ksock_conn_t *conn)
 static inline void
 ksocknal_connsock_decref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
-        if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+        LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
                 LASSERT (conn->ksnc_closing);
                 libcfs_sock_release(conn->ksnc_sock);
                 conn->ksnc_sock = NULL;
@@ -445,8 +445,8 @@ ksocknal_connsock_decref (ksock_conn_t *conn)
 static inline void
 ksocknal_tx_addref (ksock_tx_t *tx)
 {
-        LASSERT (atomic_read(&tx->tx_refcount) > 0);
-        atomic_inc(&tx->tx_refcount);
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+        cfs_atomic_inc(&tx->tx_refcount);
 }
 
 extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
@@ -454,16 +454,16 @@ extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
 static inline void
 ksocknal_tx_decref (ksock_tx_t *tx)
 {
-        LASSERT (atomic_read(&tx->tx_refcount) > 0);
-        if (atomic_dec_and_test(&tx->tx_refcount))
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&tx->tx_refcount))
                 ksocknal_tx_done(NULL, tx);
 }
 
 static inline void
 ksocknal_route_addref (ksock_route_t *route)
 {
-        LASSERT (atomic_read(&route->ksnr_refcount) > 0);
-        atomic_inc(&route->ksnr_refcount);
+        LASSERT (cfs_atomic_read(&route->ksnr_refcount) > 0);
+        cfs_atomic_inc(&route->ksnr_refcount);
 }
 
 extern void ksocknal_destroy_route (ksock_route_t *route);
@@ -471,16 +471,16 @@ extern void ksocknal_destroy_route (ksock_route_t *route);
 static inline void
 ksocknal_route_decref (ksock_route_t *route)
 {
-        LASSERT (atomic_read (&route->ksnr_refcount) > 0);
-        if (atomic_dec_and_test(&route->ksnr_refcount))
+        LASSERT (cfs_atomic_read (&route->ksnr_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&route->ksnr_refcount))
                 ksocknal_destroy_route (route);
 }
 
 static inline void
 ksocknal_peer_addref (ksock_peer_t *peer)
 {
-        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
-        atomic_inc(&peer->ksnp_refcount);
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+        cfs_atomic_inc(&peer->ksnp_refcount);
 }
 
 extern void ksocknal_destroy_peer (ksock_peer_t *peer);
@@ -488,8 +488,8 @@ extern void ksocknal_destroy_peer (ksock_peer_t *peer);
 static inline void
 ksocknal_peer_decref (ksock_peer_t *peer)
 {
-        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
-        if (atomic_dec_and_test(&peer->ksnp_refcount))
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&peer->ksnp_refcount))
                 ksocknal_destroy_peer (peer);
 }
 
@@ -558,3 +558,7 @@ extern int ksocknal_lib_tunables_init(void);
 extern void ksocknal_lib_tunables_fini(void);
 
 extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+
+extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+extern __u64 ksocknal_lib_new_incarnation(void);
+extern int ksocknal_lib_bind_thread_to_cpu(int id);
index 8c2c956..024c306 100644 (file)
@@ -33,7 +33,7 @@ ksocknal_alloc_tx (int size)
 
         if (size == KSOCK_NOOP_TX_SIZE) {
                 /* searching for a noop tx in free list */
-                spin_lock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
                 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                         tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
@@ -42,7 +42,7 @@ ksocknal_alloc_tx (int size)
                         list_del(&tx->tx_list);
                 }
 
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         }
 
         if (tx == NULL)
@@ -51,9 +51,9 @@ ksocknal_alloc_tx (int size)
         if (tx == NULL)
                 return NULL;
 
-        atomic_set(&tx->tx_refcount, 1);
+        cfs_atomic_set(&tx->tx_refcount, 1);
         tx->tx_desc_size = size;
-        atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+        cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
 
         return tx;
 }
@@ -61,15 +61,15 @@ ksocknal_alloc_tx (int size)
 void
 ksocknal_free_tx (ksock_tx_t *tx)
 {
-        atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+        cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
         if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
                 /* it's a noop tx */
-                spin_lock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
                 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
 
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         } else {
                 LIBCFS_FREE(tx, tx->tx_desc_size);
         }
@@ -188,7 +188,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                         rc = ksocknal_send_kiov (conn, tx);
                 }
 
-                bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
+                bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
                 if (rc > 0)                     /* sent something? */
                         conn->ksnc_tx_bufnob += rc; /* account it */
 
@@ -199,42 +199,23 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
                         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
                         conn->ksnc_tx_bufnob = bufnob;
-                        mb();
+                        cfs_mb();
                 }
 
                 if (rc <= 0) { /* Didn't write anything? */
-                        ksock_sched_t *sched;
 
                         if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
                                 rc = -EAGAIN;
 
-                        if (rc != -EAGAIN)
-                                break;
-
                         /* Check if EAGAIN is due to memory pressure */
-
-                        sched = conn->ksnc_scheduler;
-                        spin_lock_bh (&sched->kss_lock);
-
-                        if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
-                            !conn->ksnc_tx_ready) {
-                                /* SOCK_NOSPACE is set when the socket fills
-                                 * and cleared in the write_space callback
-                                 * (which also sets ksnc_tx_ready).  If
-                                 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
-                                 * zero, I didn't fill the socket and
-                                 * write_space won't reschedule me, so I
-                                 * return -ENOMEM to get my caller to retry
-                                 * after a timeout */
+                        if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
                                 rc = -ENOMEM;
-                        }
 
-                        spin_unlock_bh (&sched->kss_lock);
                         break;
                 }
 
                 /* socket's wmem_queued now includes 'rc' bytes */
-                atomic_sub (rc, &conn->ksnc_tx_nob);
+                cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
                 rc = 0;
 
         } while (tx->tx_resid != 0);
@@ -265,7 +246,7 @@ ksocknal_recv_iov (ksock_conn_t *conn)
         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
         conn->ksnc_rx_deadline =
                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();                           /* order with setting rx_started */
+        cfs_mb();                       /* order with setting rx_started */
         conn->ksnc_rx_started = 1;
 
         conn->ksnc_rx_nob_wanted -= nob;
@@ -309,7 +290,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
         conn->ksnc_rx_deadline =
                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();                           /* order with setting rx_started */
+        cfs_mb();                       /* order with setting rx_started */
         conn->ksnc_rx_started = 1;
 
         conn->ksnc_rx_nob_wanted -= nob;
@@ -422,7 +403,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
 
                 list_del (&tx->tx_list);
 
-                LASSERT (atomic_read(&tx->tx_refcount) == 1);
+                LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
                 ksocknal_tx_done (ni, tx);
         }
 }
@@ -461,7 +442,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
 
         ksocknal_tx_addref(tx);
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         /* ZC_REQ is going to be pinned to the peer */
         tx->tx_deadline =
@@ -471,7 +452,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
         tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++;
         list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 }
 
 static void
@@ -479,18 +460,18 @@ ksocknal_unzc_req(ksock_tx_t *tx)
 {
         ksock_peer_t   *peer = tx->tx_conn->ksnc_peer;
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         if (tx->tx_msg.ksm_zc_req_cookie == 0) {
                 /* Not waiting for an ACK */
-                spin_unlock(&peer->ksnp_lock);
+                cfs_spin_unlock(&peer->ksnp_lock);
                 return;
         }
 
         tx->tx_msg.ksm_zc_req_cookie = 0;
         list_del(&tx->tx_zc_list);
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         ksocknal_tx_decref(tx);
 }
@@ -525,10 +506,10 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 counter++;   /* exponential backoff warnings */
                 if ((counter & (-counter)) == counter)
                         CWARN("%u ENOMEM tx %p (%u allocated)\n",
-                              counter, conn, atomic_read(&libcfs_kmemory));
+                              counter, conn, cfs_atomic_read(&libcfs_kmemory));
 
                 /* Queue on ksnd_enomem_conns for retry after a timeout */
-                spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                 /* enomem list takes over scheduler's ref... */
                 LASSERT (conn->ksnc_tx_scheduled);
@@ -539,7 +520,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                                    ksocknal_data.ksnd_reaper_waketime))
                         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
-                spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
                 return (rc);
         }
 
@@ -589,13 +570,13 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
         route->ksnr_scheduled = 1;              /* scheduling conn for connd */
         ksocknal_route_addref(route);           /* extra ref for connd */
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         list_add_tail (&route->ksnr_connd_list,
                        &ksocknal_data.ksnd_connd_routes);
         cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 }
 
 ksock_conn_t *
@@ -614,8 +595,8 @@ ksocknal_find_conn_locked (int payload_nob, ksock_peer_t *peer)
 #if SOCKNAL_ROUND_ROBIN
                 const int     nob = 0;
 #else
-                int           nob = atomic_read(&c->ksnc_tx_nob) +
-                                        SOCK_WMEM_QUEUED(c->ksnc_sock);
+                int           nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+                                        libcfs_sock_wmem_queued(c->ksnc_sock);
 #endif
                 LASSERT (!c->ksnc_closing);
                 LASSERT (c->ksnc_proto != NULL);
@@ -757,7 +738,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                                               KSOCK_MSG_NOOP,
                 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
-        atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+        cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
         tx->tx_conn = conn;
         ksocknal_conn_addref(conn); /* +1 ref for tx */
 
@@ -765,15 +746,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
          * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
          * but they're used inside spinlocks a lot.
          */
-        bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
-        spin_lock_bh (&sched->kss_lock);
+        bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
                 /* First packet starts the timeout */
                 conn->ksnc_tx_deadline =
                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
                 conn->ksnc_tx_bufnob = 0;
-                mb();    /* order with adding to tx_queue */
+                cfs_mb(); /* order with adding to tx_queue */
         }
 
         ztx = NULL;
@@ -787,7 +768,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 if (conn->ksnc_tx_mono != NULL) {
                         if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) {
                                 /* zc-ack cookie is piggybacked */
-                                atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
+                                cfs_atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
                                 ztx = tx;       /* Put to freelist later */
                         } else {
                                 /* no packet can piggyback zc-ack cookie */
@@ -818,7 +799,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                                 list_add(&tx->tx_list, &ztx->tx_list);
                                 list_del(&ztx->tx_list);
 
-                                atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+                                cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
                         } else {
                                 /* no noop zc-ack packet, just enqueue it */
                                 LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET);
@@ -848,7 +829,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 }
 
 ksock_route_t *
@@ -905,7 +886,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         ksock_peer_t     *peer;
         ksock_conn_t     *conn;
         ksock_route_t    *route;
-        rwlock_t         *g_lock;
+        cfs_rwlock_t     *g_lock;
         int               retry;
         int               rc;
 
@@ -916,7 +897,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 
         for (retry = 0;; retry = 1) {
 #if !SOCKNAL_ROUND_ROBIN
-                read_lock (g_lock);
+                cfs_read_lock (g_lock);
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL) {
                         if (ksocknal_find_connectable_route_locked(peer) == NULL) {
@@ -926,22 +907,22 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
                                          * connecting and I do have an actual
                                          * connection... */
                                         ksocknal_queue_tx_locked (tx, conn);
-                                        read_unlock (g_lock);
+                                        cfs_read_unlock (g_lock);
                                         return (0);
                                 }
                         }
                 }
 
                 /* I'll need a write lock... */
-                read_unlock (g_lock);
+                cfs_read_unlock (g_lock);
 #endif
-                write_lock_bh (g_lock);
+                cfs_write_lock_bh (g_lock);
 
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL)
                         break;
 
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
 
                 if ((id.pid & LNET_PID_USERFLAG) != 0) {
                         CERROR("Refusing to create a connection to "
@@ -977,7 +958,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         if (conn != NULL) {
                 /* Connection exists; queue message on it */
                 ksocknal_queue_tx_locked (tx, conn);
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
                 return (0);
         }
 
@@ -989,11 +970,11 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
                 
                 /* Queue the message until a connection is established */
                 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
                 return 0;
         }
 
-        write_unlock_bh (g_lock);
+        cfs_write_unlock_bh (g_lock);
 
         /* NB Routes may be ignored if connections to them failed recently */
         CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id));
@@ -1024,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (payload_niov <= LNET_MAX_IOV);
         /* payload is either all vaddrs or all pages */
         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         if (payload_iov != NULL)
                 desc_size = offsetof(ksock_tx_t,
@@ -1079,18 +1060,18 @@ ksocknal_thread_start (int (*fn)(void *arg), void *arg)
         if (pid < 0)
                 return ((int)pid);
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads++;
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
         return (0);
 }
 
 void
 ksocknal_thread_fini (void)
 {
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads--;
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -1111,7 +1092,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 
         if (nob_to_skip == 0) {         /* right at next packet boundary now */
                 conn->ksnc_rx_started = 0;
-                mb ();                          /* racing with timeout thread */
+                cfs_mb();                       /* racing with timeout thread */
 
                 switch (conn->ksnc_proto->pro_version) {
                 case  KSOCK_PROTO_V2:
@@ -1183,22 +1164,22 @@ ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie)
         ksock_sched_t  *sched;
         int             rc;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = ksocknal_find_conn_locked (0, peer);
         if (conn == NULL) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 CERROR("Can't find connection to send zcack.\n");
                 return -ECONNRESET;
         }
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
         rc = ksocknal_piggyback_zcack(conn, cookie);
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         if (rc) {
                 /* Ack cookie is piggybacked */
                 return 0;
@@ -1220,18 +1201,18 @@ ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie)
         ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
         tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = ksocknal_find_conn_locked (0, peer);
         if (conn == NULL) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 ksocknal_free_tx(tx);
                 CERROR("Can't find connection to send zcack.\n");
                 return -ECONNRESET;
         }
         ksocknal_queue_tx_locked(tx, conn);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return 0;
 }
@@ -1243,7 +1224,7 @@ ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie)
         ksock_tx_t             *tx;
         struct list_head       *ctmp;
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         list_for_each(ctmp, &peer->ksnp_zc_req_list) {
                 tx = list_entry (ctmp, ksock_tx_t, tx_zc_list);
@@ -1253,12 +1234,12 @@ ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie)
                 tx->tx_msg.ksm_zc_req_cookie = 0;
                 list_del(&tx->tx_zc_list);
 
-                spin_unlock(&peer->ksnp_lock);
+                cfs_spin_unlock(&peer->ksnp_lock);
 
                 ksocknal_tx_decref(tx);
                 return 0;
         }
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         return -EPROTO;
 }
@@ -1268,7 +1249,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 {
         int           rc;
 
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
 
         /* NB: sched lock NOT held */
         /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
@@ -1491,7 +1472,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         LASSERT (conn->ksnc_rx_scheduled);
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_PARSE_WAIT:
@@ -1507,7 +1488,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         ksocknal_conn_decref(conn);
         return (0);
 }
@@ -1517,13 +1498,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
 {
         int           rc;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         rc = (!ksocknal_data.ksnd_shuttingdown &&
               list_empty(&sched->kss_rx_conns) &&
               list_empty(&sched->kss_tx_conns));
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         return (rc);
 }
 
@@ -1541,18 +1522,10 @@ int ksocknal_scheduler (void *arg)
         cfs_daemonize (name);
         cfs_block_allsigs ();
 
-#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
-        id = ksocknal_sched2cpu(id);
-        if (cpu_online(id)) {
-                cpumask_t m = CPU_MASK_NONE;
-                cpu_set(id, m);
-                set_cpus_allowed(current, m);
-        } else {
+        if (ksocknal_lib_bind_thread_to_cpu(id))
                 CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
-        }
-#endif /* CONFIG_SMP && CPU_AFFINITY */
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
                 int did_something = 0;
@@ -1572,11 +1545,11 @@ int ksocknal_scheduler (void *arg)
                          * data_ready can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_rx_ready = 0;
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         rc = ksocknal_process_receive(conn);
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
 
                         /* I'm the only one that can clear this flag */
                         LASSERT(conn->ksnc_rx_scheduled);
@@ -1633,7 +1606,7 @@ int ksocknal_scheduler (void *arg)
                          * write_space can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_tx_ready = 0;
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         if (!list_empty(&zlist)) {
                                 /* free zombie noop txs, it's fast because 
@@ -1645,13 +1618,13 @@ int ksocknal_scheduler (void *arg)
 
                         if (rc == -ENOMEM || rc == -EAGAIN) {
                                 /* Incomplete send: replace tx on HEAD of tx_queue */
-                                spin_lock_bh (&sched->kss_lock);
+                                cfs_spin_lock_bh (&sched->kss_lock);
                                 list_add (&tx->tx_list, &conn->ksnc_tx_queue);
                         } else {
                                 /* Complete send; tx -ref */
                                 ksocknal_tx_decref (tx);
 
-                                spin_lock_bh (&sched->kss_lock);
+                                cfs_spin_lock_bh (&sched->kss_lock);
                                 /* assume space for more */
                                 conn->ksnc_tx_ready = 1;
                         }
@@ -1674,7 +1647,7 @@ int ksocknal_scheduler (void *arg)
                 }
                 if (!did_something ||           /* nothing to do */
                     ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         nloops = 0;
 
@@ -1687,11 +1660,11 @@ int ksocknal_scheduler (void *arg)
                                 our_cond_resched();
                         }
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
                 }
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         ksocknal_thread_fini ();
         return (0);
 }
@@ -1707,7 +1680,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         conn->ksnc_rx_ready = 1;
 
@@ -1720,7 +1693,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
                 cfs_waitq_signal (&sched->kss_waitq);
         }
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
         EXIT;
 }
@@ -1736,7 +1709,7 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         conn->ksnc_tx_ready = 1;
 
@@ -1751,7 +1724,7 @@ void ksocknal_write_callback (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
         EXIT;
 }
@@ -2310,7 +2283,7 @@ ksocknal_connect (ksock_route_t *route)
         deadline = cfs_time_add(cfs_time_current(),
                                 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         LASSERT (route->ksnr_scheduled);
         LASSERT (!route->ksnr_connecting);
@@ -2350,7 +2323,7 @@ ksocknal_connect (ksock_route_t *route)
                         type = SOCKLND_CONN_BULK_OUT;
                 }
 
-                write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+                cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
                 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
                         rc = -ETIMEDOUT;
@@ -2381,7 +2354,7 @@ ksocknal_connect (ksock_route_t *route)
                         CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
                                libcfs_nid2str(peer->ksnp_id.nid));
 
-                write_lock_bh (&ksocknal_data.ksnd_global_lock);
+                cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         }
 
         route->ksnr_scheduled = 0;
@@ -2393,11 +2366,11 @@ ksocknal_connect (ksock_route_t *route)
                 ksocknal_launch_connection_locked(route);
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
         return;
 
  failed:
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         route->ksnr_scheduled = 0;
         route->ksnr_connecting = 0;
@@ -2435,7 +2408,7 @@ ksocknal_connect (ksock_route_t *route)
                 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
         }
 #endif
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_peer_failed(peer);
         ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
@@ -2456,13 +2429,13 @@ ksocknal_connd_ready(void)
 {
         int            rc;
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         rc = ksocknal_data.ksnd_shuttingdown ||
              !list_empty(&ksocknal_data.ksnd_connd_connreqs) ||
              ksocknal_connd_connect_route_locked();
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
         return rc;
 }
@@ -2480,7 +2453,7 @@ ksocknal_connd (void *arg)
         cfs_daemonize (name);
         cfs_block_allsigs ();
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
@@ -2490,14 +2463,14 @@ ksocknal_connd (void *arg)
                                         ksock_connreq_t, ksncr_list);
 
                         list_del(&cr->ksncr_list);
-                        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                         ksocknal_create_conn(cr->ksncr_ni, NULL,
                                              cr->ksncr_sock, SOCKLND_CONN_NONE);
                         lnet_ni_decref(cr->ksncr_ni);
                         LIBCFS_FREE(cr, sizeof(*cr));
 
-                        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
                 }
 
                 if (ksocknal_connd_connect_route_locked()) {
@@ -2507,25 +2480,25 @@ ksocknal_connd (void *arg)
 
                         list_del (&route->ksnr_connd_list);
                         ksocknal_data.ksnd_connd_connecting++;
-                        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                         ksocknal_connect (route);
                         ksocknal_route_decref(route);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
                         ksocknal_data.ksnd_connd_connecting--;
                 }
 
-                spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                 cfs_wait_event_interruptible_exclusive(
                         ksocknal_data.ksnd_connd_waitq,
                         ksocknal_connd_ready(), rc);
 
-                spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
         }
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
         ksocknal_thread_fini ();
         return (0);
@@ -2547,7 +2520,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 
                 /* SOCK_ERROR will reset error code of socket in
                  * some platform (like Darwin8.x) */
-                error = SOCK_ERROR(conn->ksnc_sock);
+                error = libcfs_sock_error(conn->ksnc_sock);
                 if (error != 0) {
                         ksocknal_conn_addref(conn);
 
@@ -2598,7 +2571,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
                 }
 
                 if ((!list_empty(&conn->ksnc_tx_queue) ||
-                     SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
+                     libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
                     cfs_time_aftereq(cfs_time_current(),
                                      conn->ksnc_tx_deadline)) {
                         /* Timed out messages queued for sending or
@@ -2623,7 +2596,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
         ksock_tx_t        *tx;
         CFS_LIST_HEAD      (stale_txs);
         
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         while (!list_empty (&peer->ksnp_tx_queue)) {
                 tx = list_entry (peer->ksnp_tx_queue.next,
@@ -2637,7 +2610,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
                 list_add_tail (&tx->tx_list, &stale_txs);
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
 }
@@ -2654,14 +2627,14 @@ ksocknal_check_peer_timeouts (int idx)
         /* NB. We expect to have a look at all the peers and not find any
          * connections to time out, so we just use a shared lock while we
          * take a look... */
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         list_for_each (ptmp, peers) {
                 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
                 conn = ksocknal_find_timed_out_conn (peer);
 
                 if (conn != NULL) {
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                         ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
 
@@ -2682,7 +2655,7 @@ ksocknal_check_peer_timeouts (int idx)
                                              tx->tx_deadline)) {
 
                                 ksocknal_peer_addref(peer);
-                                read_unlock (&ksocknal_data.ksnd_global_lock);
+                                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                                 
                                 ksocknal_flush_stale_txs(peer);
 
@@ -2716,7 +2689,7 @@ ksocknal_check_peer_timeouts (int idx)
                 }
         }
         
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -2738,7 +2711,7 @@ ksocknal_reaper (void *arg)
         CFS_INIT_LIST_HEAD(&enomem_conns);
         cfs_waitlink_init (&wait);
 
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
@@ -2747,12 +2720,12 @@ ksocknal_reaper (void *arg)
                                            ksock_conn_t, ksnc_list);
                         list_del (&conn->ksnc_list);
 
-                        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                         ksocknal_terminate_conn (conn);
                         ksocknal_conn_decref(conn);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2761,11 +2734,11 @@ ksocknal_reaper (void *arg)
                                            ksock_conn_t, ksnc_list);
                         list_del (&conn->ksnc_list);
 
-                        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                         ksocknal_destroy_conn (conn);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2774,7 +2747,7 @@ ksocknal_reaper (void *arg)
                         list_del_init(&ksocknal_data.ksnd_enomem_conns);
                 }
 
-                spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                 /* reschedule all the connections that stalled with ENOMEM... */
                 nenomem_conns = 0;
@@ -2785,14 +2758,14 @@ ksocknal_reaper (void *arg)
 
                         sched = conn->ksnc_scheduler;
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
 
                         LASSERT (conn->ksnc_tx_scheduled);
                         conn->ksnc_tx_ready = 1;
                         list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
                         cfs_waitq_signal (&sched->kss_waitq);
 
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
                         nenomem_conns++;
                 }
 
@@ -2834,7 +2807,7 @@ ksocknal_reaper (void *arg)
                 ksocknal_data.ksnd_reaper_waketime =
                         cfs_time_add(cfs_time_current(), timeout);
 
-                set_current_state (TASK_INTERRUPTIBLE);
+                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                 if (!ksocknal_data.ksnd_shuttingdown &&
@@ -2842,13 +2815,13 @@ ksocknal_reaper (void *arg)
                     list_empty (&ksocknal_data.ksnd_zombie_conns))
                         cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
 
-                set_current_state (TASK_RUNNING);
+                cfs_set_current_state (CFS_TASK_RUNNING);
                 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
-                spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
         }
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         ksocknal_thread_fini ();
         return (0);
index 70c9b39..5b0a9e9 100644 (file)
@@ -370,13 +370,13 @@ ksocknal_lib_bind_irq (unsigned int irq)
 
         info = &ksocknal_data.ksnd_irqinfo[irq];
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         LASSERT (info->ksni_valid);
         bind = !info->ksni_bound;
         info->ksni_bound = 1;
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (!bind)                              /* bound already */
                 return;
@@ -1096,7 +1096,7 @@ ksocknal_data_ready (struct sock *sk, int n)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         if (conn == NULL) {             /* raced with ksocknal_terminate_conn */
@@ -1105,7 +1105,7 @@ ksocknal_data_ready (struct sock *sk, int n)
         } else
                 ksocknal_read_callback(conn);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         EXIT;
 }
@@ -1119,7 +1119,7 @@ ksocknal_write_space (struct sock *sk)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         wspace = SOCKNAL_WSPACE(sk);
@@ -1138,7 +1138,7 @@ ksocknal_write_space (struct sock *sk)
                 LASSERT (sk->sk_write_space != &ksocknal_write_space);
                 sk->sk_write_space (sk);
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return;
         }
 
@@ -1152,7 +1152,7 @@ ksocknal_write_space (struct sock *sk)
                 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 }
 
 void
@@ -1187,3 +1187,64 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 
         return ;
 }
+
+int
+ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+{
+        int            rc = 0;
+        ksock_sched_t *sched;
+        
+        sched = conn->ksnc_scheduler;
+        cfs_spin_lock_bh (&sched->kss_lock);
+        
+        if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
+            !conn->ksnc_tx_ready) {
+                /* SOCK_NOSPACE is set when the socket fills
+                 * and cleared in the write_space callback
+                 * (which also sets ksnc_tx_ready).  If
+                 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
+                 * zero, I didn't fill the socket and
+                 * write_space won't reschedule me, so I
+                 * return -ENOMEM to get my caller to retry
+                 * after a timeout */
+                rc = -ENOMEM;
+        }
+        
+        cfs_spin_unlock_bh (&sched->kss_lock);
+
+        return rc;
+}
+
+__u64
+ksocknal_lib_new_incarnation(void)
+{
+        struct timeval tv;
+
+        /* The incarnation number is the time this module loaded and it
+         * identifies this particular instance of the socknal.  Hopefully
+         * we won't be able to reboot more frequently than 1MHz for the
+         * forseeable future :) */
+
+        do_gettimeofday(&tv);
+
+        return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+int
+ksocknal_lib_bind_thread_to_cpu(int id)
+{
+#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
+        id = ksocknal_sched2cpu(id);
+        if (cpu_online(id)) {
+                cpumask_t m = CPU_MASK_NONE;
+                cpu_set(id, m);
+                set_cpus_allowed(current, m);
+                return 0;
+        }
+
+        return -1;
+
+#else
+        return 0;
+#endif
+}