Whamcloud - gitweb
b=17087
authormaxim <maxim>
Sat, 29 Nov 2008 21:15:42 +0000 (21:15 +0000)
committermaxim <maxim>
Sat, 29 Nov 2008 21:15:42 +0000 (21:15 +0000)
i=rread
i=isaac
Landing the patch making ksocklnd more os-neutral (normalizing names of
lock primitives, moving linux-spcific code to socklnd_lib-linux.c)

lnet/include/libcfs/linux/linux-lock.h
lnet/include/libcfs/linux/linux-prim.h
lnet/include/libcfs/linux/linux-tcpip.h
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_lib-linux.c

index e2be9d9..67fb75b 100644 (file)
  * - wait_for_completion(c)
  */
 
+/*
+ * spinlock "implementation"
+ */
+
+typedef spinlock_t cfs_spinlock_t;
+
+#define cfs_spin_lock_init(lock) spin_lock_init(lock)
+#define cfs_spin_lock(lock)      spin_lock(lock)
+#define cfs_spin_lock_bh(lock)   spin_lock_bh(lock)
+#define cfs_spin_unlock(lock)    spin_unlock(lock)
+#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
+
+/*
+ * rwlock "implementation"
+ */
+
+typedef rwlock_t cfs_rwlock_t;
+
+#define cfs_rwlock_init(lock)      rwlock_init(lock)
+#define cfs_read_lock(lock)        read_lock(lock)
+#define cfs_read_unlock(lock)      read_unlock(lock)
+#define cfs_write_lock_bh(lock)    write_lock_bh(lock)
+#define cfs_write_unlock_bh(lock)  write_unlock_bh(lock)
+
 /* __KERNEL__ */
 #else
 
index eb42170..33450fc 100644 (file)
@@ -129,6 +129,9 @@ typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
  */
 #define CFS_TASK_INTERRUPTIBLE          TASK_INTERRUPTIBLE
 #define CFS_TASK_UNINT                  TASK_UNINTERRUPTIBLE
+#define CFS_TASK_RUNNING                TASK_RUNNING
+
+#define cfs_set_current_state(state) set_current_state(state)
 
 typedef wait_queue_t                   cfs_waitlink_t;
 typedef wait_queue_head_t              cfs_waitq_t;
@@ -328,6 +331,37 @@ do {                                                           \
 #define cfs_waitq_wait_event_interruptible_timeout wait_event_interruptible_timeout
 #endif
 
+#define cfs_wait_event_interruptible_exclusive(wq, condition, rc)       \
+({                                                                      \
+        rc = wait_event_interruptible_exclusive(wq, condition);         \
+})
+
+/*
+ * atomic
+ */
+
+typedef atomic_t cfs_atomic_t;
+
+#define cfs_atomic_read(atom)         atomic_read(atom)
+#define cfs_atomic_inc(atom)          atomic_inc(atom)
+#define cfs_atomic_dec(atom)          atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
+#define cfs_atomic_set(atom, value)   atomic_set(atom, value)
+#define cfs_atomic_add(value, atom)   atomic_add(value, atom)
+#define cfs_atomic_sub(value, atom)   atomic_sub(value, atom)
+
+/*
+ * membar
+ */
+
+#define cfs_mb() mb()
+
+/*
+ * interrupt
+ */
+
+#define cfs_in_interrupt() in_interrupt()
+
 #else   /* !__KERNEL__ */
 
 typedef struct proc_dir_entry           cfs_proc_dir_entry_t;
index 1cfe512..1f918ea 100644 (file)
@@ -69,10 +69,20 @@ typedef struct socket   cfs_socket_t;
 #endif
 
 #define SOCK_SNDBUF(so)         ((so)->sk->sk_sndbuf)
-#define SOCK_WMEM_QUEUED(so)    ((so)->sk->sk_wmem_queued)
-#define SOCK_ERROR(so)          ((so)->sk->sk_err)
 #define SOCK_TEST_NOSPACE(so)   test_bit(SOCK_NOSPACE, &(so)->flags)
 
+static inline int
+libcfs_sock_error(struct socket *sock)
+{
+        return sock->sk->sk_err;
+}
+
+static inline int
+libcfs_sock_wmem_queued(struct socket *sock)
+{
+        return sock->sk->sk_wmem_queued;
+}
+
 #else   /* !__KERNEL__ */
 
 #include "../user-tcpip.h"
index 6150792..fd916ba 100644 (file)
@@ -83,7 +83,7 @@ ksocknal_create_route (__u32 ipaddr, int port)
         if (route == NULL)
                 return (NULL);
 
-        atomic_set (&route->ksnr_refcount, 1);
+        cfs_atomic_set (&route->ksnr_refcount, 1);
         route->ksnr_peer = NULL;
         route->ksnr_retry_interval = 0;         /* OK to connect at any time */
         route->ksnr_ipaddr = ipaddr;
@@ -101,7 +101,7 @@ ksocknal_create_route (__u32 ipaddr, int port)
 void
 ksocknal_destroy_route (ksock_route_t *route)
 {
-        LASSERT (atomic_read(&route->ksnr_refcount) == 0);
+        LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
 
         if (route->ksnr_peer != NULL)
                 ksocknal_peer_decref(route->ksnr_peer);
@@ -117,7 +117,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         LASSERT (id.nid != LNET_NID_ANY);
         LASSERT (id.pid != LNET_PID_ANY);
-        LASSERT (!in_interrupt());
+        LASSERT (!cfs_in_interrupt());
 
         LIBCFS_ALLOC (peer, sizeof (*peer));
         if (peer == NULL)
@@ -127,7 +127,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         peer->ksnp_ni = ni;
         peer->ksnp_id = id;
-        atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
+        cfs_atomic_set (&peer->ksnp_refcount, 1);   /* 1 ref for caller */
         peer->ksnp_closing = 0;
         peer->ksnp_accepting = 0;
         peer->ksnp_zc_next_cookie = 1;
@@ -136,12 +136,12 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
         CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
         CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
         CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
-        spin_lock_init(&peer->ksnp_lock);
+        cfs_spin_lock_init(&peer->ksnp_lock);
 
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
 
         if (net->ksnn_shutdown) {
-                spin_unlock_bh (&net->ksnn_lock);
+                cfs_spin_unlock_bh (&net->ksnn_lock);
 
                 LIBCFS_FREE(peer, sizeof(*peer));
                 CERROR("Can't create peer: network shutdown\n");
@@ -150,7 +150,7 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
         net->ksnn_npeers++;
 
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         *peerp = peer;
         return 0;
@@ -164,7 +164,7 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
         CDEBUG (D_NET, "peer %s %p deleted\n",
                 libcfs_id2str(peer->ksnp_id), peer);
 
-        LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
         LASSERT (peer->ksnp_accepting == 0);
         LASSERT (list_empty (&peer->ksnp_conns));
         LASSERT (list_empty (&peer->ksnp_routes));
@@ -177,9 +177,9 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
          * until they are destroyed, so we can be assured that _all_ state to
          * do with this peer has been cleaned up when its refcount drops to
          * zero. */
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         net->ksnn_npeers--;
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 }
 
 ksock_peer_t *
@@ -204,7 +204,7 @@ ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
                        peer, libcfs_id2str(id),
-                       atomic_read(&peer->ksnp_refcount));
+                       cfs_atomic_read(&peer->ksnp_refcount));
                 return (peer);
         }
         return (NULL);
@@ -215,11 +215,11 @@ ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
 {
         ksock_peer_t     *peer;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
         peer = ksocknal_find_peer_locked (ni, id);
         if (peer != NULL)                       /* +1 ref for caller? */
                 ksocknal_peer_addref(peer);
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return (peer);
 }
@@ -267,7 +267,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
         int                j;
         int                rc = -ENOENT;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 
@@ -325,7 +325,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                 }
         }
  out:
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (rc);
 }
 
@@ -480,7 +480,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 return (-ENOMEM);
         }
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         /* always called with a ref on ni, so shutdown can't have started */
         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -512,7 +512,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 route2->ksnr_share_count++;
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (0);
 }
@@ -584,7 +584,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
         int                i;
         int                rc = -ENOENT;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
@@ -621,7 +621,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_txlist_done(ni, &zombies, 1);
 
@@ -637,7 +637,7 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
         struct list_head  *ctmp;
         int                i;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
@@ -654,13 +654,13 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
 
                                 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
                                 ksocknal_conn_addref(conn);
-                                read_unlock (&ksocknal_data.ksnd_global_lock);
+                                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                                 return (conn);
                         }
                 }
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (NULL);
 }
 
@@ -705,7 +705,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         int                i;
         int                nip;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         nip = net->ksnn_ninterfaces;
         LASSERT (nip <= LNET_MAX_INTERFACES);
@@ -713,7 +713,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         /* Only offer interfaces for additional connections if I have 
          * more than one. */
         if (nip < 2) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return 0;
         }
 
@@ -722,7 +722,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
                 LASSERT (ipaddrs[i] != 0);
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return (nip);
 }
 
@@ -761,7 +761,7 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
 int
 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 {
-        rwlock_t           *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
         ksock_net_t        *net = peer->ksnp_ni->ni_data;
         ksock_interface_t  *iface;
         ksock_interface_t  *best_iface;
@@ -783,7 +783,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Also note that I'm not going to return more than n_peerips
          * interfaces, even if I have more myself */
 
-        write_lock_bh (global_lock);
+        cfs_write_lock_bh (global_lock);
 
         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
@@ -859,7 +859,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Overwrite input peer IP addresses */
         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         return (n_ips);
 }
@@ -869,7 +869,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
         ksock_route_t      *newroute = NULL;
-        rwlock_t           *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
         lnet_ni_t          *ni = peer->ksnp_ni;
         ksock_net_t        *net = ni->ni_data;
         struct list_head   *rtmp;
@@ -887,12 +887,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
          * expecting to be dealing with small numbers of interfaces, so the
          * O(n**3)-ness here shouldn't matter */
 
-        write_lock_bh (global_lock);
+        cfs_write_lock_bh (global_lock);
 
         if (net->ksnn_ninterfaces < 2) {
                 /* Only create additional connections 
                  * if I have > 1 interface */
-                write_unlock_bh (global_lock);
+                cfs_write_unlock_bh (global_lock);
                 return;
         }
 
@@ -902,13 +902,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 if (newroute != NULL) {
                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
                 } else {
-                        write_unlock_bh (global_lock);
+                        cfs_write_unlock_bh (global_lock);
 
                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
                         if (newroute == NULL)
                                 return;
 
-                        write_lock_bh (global_lock);
+                        cfs_write_lock_bh (global_lock);
                 }
 
                 if (peer->ksnp_closing) {
@@ -976,7 +976,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 newroute = NULL;
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
         if (newroute != NULL)
                 ksocknal_route_decref(newroute);
 }
@@ -1004,12 +1004,12 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
         cr->ksncr_ni   = ni;
         cr->ksncr_sock = sock;
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
         cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
         return 0;
 }
 
@@ -1030,7 +1030,7 @@ int
 ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                       cfs_socket_t *sock, int type)
 {
-        rwlock_t          *global_lock = &ksocknal_data.ksnd_global_lock;
+        cfs_rwlock_t      *global_lock = &ksocknal_data.ksnd_global_lock;
         CFS_LIST_HEAD     (zombies);
         lnet_process_id_t  peerid;
         struct list_head  *tmp;
@@ -1065,10 +1065,10 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_sock = sock;
         /* 2 ref, 1 for conn, another extra ref prevents socket
          * being closed before establishment of connection */
-        atomic_set (&conn->ksnc_sock_refcount, 2);
+        cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
         conn->ksnc_type = type;
         ksocknal_lib_save_callback(sock, conn);
-        atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+        cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
 
         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(sock);
         conn->ksnc_rx_ready = 0;
@@ -1078,7 +1078,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_tx_ready = 0;
         conn->ksnc_tx_scheduled = 0;
         conn->ksnc_tx_mono = NULL;
-        atomic_set (&conn->ksnc_tx_nob, 0);
+        cfs_atomic_set (&conn->ksnc_tx_nob, 0);
 
         LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
                                      kshm_ips[LNET_MAX_INTERFACES]));
@@ -1105,9 +1105,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
                 peerid = peer->ksnp_id;
 
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 conn->ksnc_proto = peer->ksnp_proto;
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
 
                 if (conn->ksnc_proto == NULL) {
                         conn->ksnc_proto = &ksocknal_protocol_v2x;
@@ -1138,13 +1138,13 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
 
         if (active) {
                 ksocknal_peer_addref(peer);
-                write_lock_bh (global_lock);
+                cfs_write_lock_bh (global_lock);
         } else {
                 rc = ksocknal_create_peer(&peer, ni, peerid);
                 if (rc != 0)
                         goto failed_1;
 
-                write_lock_bh (global_lock);
+                cfs_write_lock_bh (global_lock);
 
                 /* called with a ref on ni, so shutdown can't have started */
                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -1275,9 +1275,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_scheduler = sched;
 
         /* Set the deadline for the outgoing HELLO to drain */
-        conn->ksnc_tx_bufnob = SOCK_WMEM_QUEUED(sock);
+        conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();       /* order with adding to peer's conn list */
+        cfs_mb();   /* order with adding to peer's conn list */
 
         list_add (&conn->ksnc_list, &peer->ksnp_conns);
         ksocknal_conn_addref(conn);
@@ -1295,7 +1295,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_queue_tx_locked (tx, conn);
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         /* We've now got a new connection.  Any errors from here on are just
          * like "normal" comms errors and we close the connection normally.
@@ -1335,7 +1335,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (rc == 0)
                 rc = ksocknal_lib_setup_sock(sock);
 
-        write_lock_bh(global_lock);
+        cfs_write_lock_bh(global_lock);
 
         /* NB my callbacks block while I hold ksnd_global_lock */
         ksocknal_lib_set_callback(sock, conn);
@@ -1343,12 +1343,12 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (!active)
                 peer->ksnp_accepting--;
 
-        write_unlock_bh(global_lock);
+        cfs_write_unlock_bh(global_lock);
 
         if (rc != 0) {
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 ksocknal_close_conn_locked(conn, rc);
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
         } else if (ksocknal_connsock_addref(conn) == 0) {
                 /* Allow I/O to proceed. */
                 ksocknal_read_callback(conn);
@@ -1369,7 +1369,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_unlink_peer_locked(peer);
         }
 
-        write_unlock_bh (global_lock);
+        cfs_write_unlock_bh (global_lock);
 
         if (warn != NULL) {
                 if (rc < 0)
@@ -1389,9 +1389,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
                 }
 
-                write_lock_bh(global_lock);
+                cfs_write_lock_bh(global_lock);
                 peer->ksnp_accepting--;
-                write_unlock_bh(global_lock);
+                cfs_write_unlock_bh(global_lock);
         }
 
         ksocknal_txlist_done(ni, &zombies, 1);
@@ -1468,12 +1468,12 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                 }
         }
 
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1486,7 +1486,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
          * tell LNET I think the peer is dead if it's to another kernel and
          * there are no connections or connection attempts in existance. */
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
             list_empty(&peer->ksnp_conns) &&
@@ -1498,7 +1498,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
                                          peer->ksnp_last_alive);
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         if (notify)
                 lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
@@ -1523,7 +1523,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
         LASSERT(conn->ksnc_closing);
 
         /* wake up the scheduler to "send" all remaining packets to /dev/null */
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         /* a closing conn is always ready to tx */
         conn->ksnc_tx_ready = 1;
@@ -1539,9 +1539,9 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         list_for_each_safe(tmp, nxt, &peer->ksnp_zc_req_list) {
                 tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
@@ -1556,7 +1556,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 list_add(&tx->tx_zc_list, &zlist);
         }
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         list_for_each_safe(tmp, nxt, &zlist) {
                 tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
@@ -1566,7 +1566,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
         }
 
         /* serialise with callbacks */
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
 
@@ -1581,7 +1581,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 peer->ksnp_error = 0;     /* avoid multiple notifications */
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (failed)
                 ksocknal_peer_failed(peer);
@@ -1599,13 +1599,13 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 {
         /* Queue the conn for the reaper to destroy */
 
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) == 0);
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
         cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1614,8 +1614,8 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
         /* Final coup-de-grace of the reaper */
         CDEBUG (D_NET, "connection %p\n", conn);
 
-        LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
-        LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+        LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
+        LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
         LASSERT (conn->ksnc_sock == NULL);
         LASSERT (conn->ksnc_route == NULL);
         LASSERT (!conn->ksnc_tx_scheduled);
@@ -1693,11 +1693,11 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
         __u32             ipaddr = conn->ksnc_ipaddr;
         int               count;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (count);
 }
@@ -1713,7 +1713,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
         int                 i;
         int                 count = 0;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
@@ -1735,7 +1735,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         /* wildcards always succeed */
         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
@@ -1773,7 +1773,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
         ksock_conn_t     *conn;
 
         for (index = 0; ; index++) {
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                 i = 0;
                 conn = NULL;
@@ -1786,7 +1786,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
                         }
                 }
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                 if (conn == NULL)
                         break;
@@ -1808,7 +1808,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 for (j = 0; ; j++) {
-                        read_lock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                         index = 0;
                         peer = NULL;
@@ -1831,7 +1831,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
                                 }
                         }
 
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                         if (peer != NULL) {
                                 rc = 0;
@@ -1862,7 +1862,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
             netmask == 0)
                 return (-EINVAL);
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         iface = ksocknal_ip2iface(ni, ipaddress);
         if (iface != NULL) {
@@ -1899,7 +1899,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                 /* NB only new connections will pay attention to the new interface! */
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -1957,7 +1957,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
         int                i;
         int                j;
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
@@ -1986,7 +1986,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
                 }
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -2002,7 +2002,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                 ksock_net_t       *net = ni->ni_data;
                 ksock_interface_t *iface;
 
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
                 if (data->ioc_count < 0 ||
                     data->ioc_count >= net->ksnn_ninterfaces) {
@@ -2017,7 +2017,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                         data->ioc_u32[3] = iface->ksni_nroutes;
                 }
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return rc;
         }
 
@@ -2125,7 +2125,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 void
 ksocknal_free_buffers (void)
 {
-        LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+        LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
 
         if (ksocknal_data.ksnd_schedulers != NULL)
                 LIBCFS_FREE (ksocknal_data.ksnd_schedulers,
@@ -2135,7 +2135,7 @@ ksocknal_free_buffers (void)
                      sizeof (struct list_head) *
                      ksocknal_data.ksnd_peer_hash_size);
 
-        spin_lock(&ksocknal_data.ksnd_tx_lock);
+        cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                 struct list_head  zlist;
@@ -2143,7 +2143,7 @@ ksocknal_free_buffers (void)
 
                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
                 while(!list_empty(&zlist)) {
                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
@@ -2151,7 +2151,7 @@ ksocknal_free_buffers (void)
                         LIBCFS_FREE(tx, tx->tx_desc_size);
                 }
         } else {
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         }
 }
 
@@ -2162,7 +2162,7 @@ ksocknal_base_shutdown (void)
         int            i;
 
         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
         LASSERT (ksocknal_data.ksnd_nnets == 0);
 
         switch (ksocknal_data.ksnd_init) {
@@ -2203,17 +2203,17 @@ ksocknal_base_shutdown (void)
                         }
 
                 i = 4;
-                read_lock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
                 while (ksocknal_data.ksnd_nthreads != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "waiting for %d threads to terminate\n",
                                 ksocknal_data.ksnd_nthreads);
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                         cfs_pause(cfs_time_seconds(1));
-                        read_lock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
                 }
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                 ksocknal_free_buffers();
 
@@ -2222,27 +2222,11 @@ ksocknal_base_shutdown (void)
         }
 
         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-               atomic_read (&libcfs_kmemory));
+               cfs_atomic_read (&libcfs_kmemory));
 
         PORTAL_MODULE_UNUSE;
 }
 
-
-__u64
-ksocknal_new_incarnation (void)
-{
-        struct timeval tv;
-
-        /* The incarnation number is the time this module loaded and it
-         * identifies this particular instance of the socknal.  Hopefully
-         * we won't be able to reboot more frequently than 1MHz for the
-         * forseeable future :) */
-
-        do_gettimeofday(&tv);
-
-        return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
 int
 ksocknal_base_startup (void)
 {
@@ -2263,20 +2247,20 @@ ksocknal_base_startup (void)
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
                 CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
 
-        rwlock_init(&ksocknal_data.ksnd_global_lock);
+        cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
 
-        spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
 
-        spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
         cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
 
-        spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+        cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
 
         /* NB memset above zeros whole of ksocknal_data, including
@@ -2295,7 +2279,7 @@ ksocknal_base_startup (void)
         for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
                 ksock_sched_t *kss = &ksocknal_data.ksnd_schedulers[i];
 
-                spin_lock_init (&kss->kss_lock);
+                cfs_spin_lock_init (&kss->kss_lock);
                 CFS_INIT_LIST_HEAD (&kss->kss_rx_conns);
                 CFS_INIT_LIST_HEAD (&kss->kss_tx_conns);
                 CFS_INIT_LIST_HEAD (&kss->kss_zombie_noop_txs);
@@ -2348,7 +2332,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
         struct list_head *tmp;
         int               i;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
@@ -2367,7 +2351,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
                        "closing %d, accepting %d, err %d, zcookie "LPU64", "
                        "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
-                       atomic_read(&peer->ksnp_refcount),
+                       cfs_atomic_read(&peer->ksnp_refcount),
                        peer->ksnp_sharecount, peer->ksnp_closing,
                        peer->ksnp_accepting, peer->ksnp_error,
                        peer->ksnp_zc_next_cookie,
@@ -2377,7 +2361,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 list_for_each (tmp, &peer->ksnp_routes) {
                         route = list_entry(tmp, ksock_route_t, ksnr_list);
                         CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
-                               "del %d\n", atomic_read(&route->ksnr_refcount),
+                               "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
                                route->ksnr_scheduled, route->ksnr_connecting,
                                route->ksnr_connected, route->ksnr_deleted);
                 }
@@ -2385,13 +2369,13 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 list_for_each (tmp, &peer->ksnp_conns) {
                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
                         CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
-                               atomic_read(&conn->ksnc_conn_refcount),
-                               atomic_read(&conn->ksnc_sock_refcount),
+                               cfs_atomic_read(&conn->ksnc_conn_refcount),
+                               cfs_atomic_read(&conn->ksnc_sock_refcount),
                                conn->ksnc_type, conn->ksnc_closing);
                 }
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         return;
 }
 
@@ -2406,18 +2390,18 @@ ksocknal_shutdown (lnet_ni_t *ni)
         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
         LASSERT(ksocknal_data.ksnd_nnets > 0);
 
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         net->ksnn_shutdown = 1;                 /* prevent new peers */
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         /* Delete all peers */
         ksocknal_del_peer(ni, anyid, 0);
 
         /* Wait for all peer state to clean up */
         i = 2;
-        spin_lock_bh (&net->ksnn_lock);
+        cfs_spin_lock_bh (&net->ksnn_lock);
         while (net->ksnn_npeers != 0) {
-                spin_unlock_bh (&net->ksnn_lock);
+                cfs_spin_unlock_bh (&net->ksnn_lock);
 
                 i++;
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
@@ -2427,9 +2411,9 @@ ksocknal_shutdown (lnet_ni_t *ni)
 
                 ksocknal_debug_peerhash(ni);
 
-                spin_lock_bh (&net->ksnn_lock);
+                cfs_spin_lock_bh (&net->ksnn_lock);
         }
-        spin_unlock_bh (&net->ksnn_lock);
+        cfs_spin_unlock_bh (&net->ksnn_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
@@ -2518,8 +2502,8 @@ ksocknal_startup (lnet_ni_t *ni)
                 goto fail_0;
 
         memset(net, 0, sizeof(*net));
-        spin_lock_init(&net->ksnn_lock);
-        net->ksnn_incarnation = ksocknal_new_incarnation();
+        cfs_spin_lock_init(&net->ksnn_lock);
+        net->ksnn_incarnation = ksocknal_lib_new_incarnation();
         ni->ni_data = net;
         ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
         ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peercredits;
index ac93e46..43d2fcd 100644 (file)
@@ -69,7 +69,7 @@
 
 typedef struct                                  /* per scheduler state */
 {
-        spinlock_t        kss_lock;             /* serialise */
+        cfs_spinlock_t    kss_lock;             /* serialise */
         struct list_head  kss_rx_conns;         /* conn waiting to be read */
         struct list_head  kss_tx_conns;         /* conn waiting to be written */
         struct list_head  kss_zombie_noop_txs;  /* zombie noop tx list */
@@ -131,7 +131,7 @@ typedef struct
 typedef struct
 {
         __u64             ksnn_incarnation;     /* my epoch */
-        spinlock_t        ksnn_lock;            /* serialise */
+        cfs_spinlock_t    ksnn_lock;            /* serialise */
         int               ksnn_npeers;          /* # peers */
         int               ksnn_shutdown;        /* shutting down? */
         int               ksnn_ninterfaces;     /* IP interfaces */
@@ -143,7 +143,7 @@ typedef struct
         int               ksnd_init;            /* initialisation state */
         int               ksnd_nnets;           /* # networks set up */
 
-        rwlock_t          ksnd_global_lock;     /* stabilize peer/conn ops */
+        cfs_rwlock_t      ksnd_global_lock;     /* stabilize peer/conn ops */
         struct list_head *ksnd_peers;           /* hash table of all my known peers */
         int               ksnd_peer_hash_size;  /* size of ksnd_peers */
 
@@ -152,14 +152,14 @@ typedef struct
         int               ksnd_nschedulers;     /* # schedulers */
         ksock_sched_t    *ksnd_schedulers;      /* their state */
 
-        atomic_t          ksnd_nactive_txs;     /* #active txs */
+        cfs_atomic_t      ksnd_nactive_txs;     /* #active txs */
 
         struct list_head  ksnd_deathrow_conns;  /* conns to close: reaper_lock*/
         struct list_head  ksnd_zombie_conns;    /* conns to free: reaper_lock */
         struct list_head  ksnd_enomem_conns;    /* conns to retry: reaper_lock*/
         cfs_waitq_t       ksnd_reaper_waitq;    /* reaper sleeps here */
         cfs_time_t        ksnd_reaper_waketime; /* when reaper will wake */
-        spinlock_t        ksnd_reaper_lock;     /* serialise */
+        cfs_spinlock_t    ksnd_reaper_lock;     /* serialise */
 
         int               ksnd_enomem_tx;       /* test ENOMEM sender */
         int               ksnd_stall_tx;        /* test sluggish sender */
@@ -169,10 +169,10 @@ typedef struct
         struct list_head  ksnd_connd_routes;    /* routes waiting to be connected */
         cfs_waitq_t       ksnd_connd_waitq;     /* connds sleep here */
         int               ksnd_connd_connecting;/* # connds connecting */
-        spinlock_t        ksnd_connd_lock;      /* serialise */
+        cfs_spinlock_t    ksnd_connd_lock;      /* serialise */
 
         struct list_head  ksnd_idle_noop_txs;   /* list head for freed noop tx */
-        spinlock_t        ksnd_tx_lock;         /* serialise, NOT safe in g_lock */
+        cfs_spinlock_t    ksnd_tx_lock;         /* serialise, NOT safe in g_lock */
 
         ksock_irqinfo_t   ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
 
@@ -200,7 +200,7 @@ typedef struct                                  /* transmit packet */
 {
         struct list_head        tx_list;        /* queue on conn for transmission etc */
         struct list_head        tx_zc_list;     /* queue on peer for ZC request */
-        atomic_t                tx_refcount;    /* tx reference count */
+        cfs_atomic_t            tx_refcount;    /* tx reference count */
         int                     tx_nob;         /* # packet bytes */
         int                     tx_resid;       /* residual bytes */
         int                     tx_niov;        /* # packet iovec frags */
@@ -249,8 +249,8 @@ typedef struct ksock_conn
         cfs_socket_t       *ksnc_sock;          /* actual socket */
         void               *ksnc_saved_data_ready; /* socket's original data_ready() callback */
         void               *ksnc_saved_write_space; /* socket's original write_space() callback */
-        atomic_t            ksnc_conn_refcount; /* conn refcount */
-        atomic_t            ksnc_sock_refcount; /* sock refcount */
+        cfs_atomic_t        ksnc_conn_refcount; /* conn refcount */
+        cfs_atomic_t        ksnc_sock_refcount; /* sock refcount */
         ksock_sched_t      *ksnc_scheduler;     /* who schedules this connection */
         __u32               ksnc_myipaddr;      /* my IP */
         __u32               ksnc_ipaddr;        /* peer's IP */
@@ -290,7 +290,7 @@ typedef struct ksock_conn
                                                  * b. noop ZC-ACK packet */
         cfs_time_t          ksnc_tx_deadline;   /* when (in jiffies) tx times out */
         int                 ksnc_tx_bufnob;     /* send buffer marker */
-        atomic_t            ksnc_tx_nob;        /* # bytes queued */
+        cfs_atomic_t        ksnc_tx_nob;        /* # bytes queued */
         int                 ksnc_tx_ready;      /* write space */
         int                 ksnc_tx_scheduled;  /* being progressed */
 
@@ -307,7 +307,7 @@ typedef struct ksock_route
         struct list_head    ksnr_list;          /* chain on peer route list */
         struct list_head    ksnr_connd_list;    /* chain on ksnr_connd_routes */
         struct ksock_peer  *ksnr_peer;          /* owning peer */
-        atomic_t            ksnr_refcount;      /* # users */
+        cfs_atomic_t        ksnr_refcount;      /* # users */
         cfs_time_t          ksnr_timeout;       /* when (in jiffies) reconnection can happen next */
         cfs_duration_t      ksnr_retry_interval; /* how long between retries */
         __u32               ksnr_myipaddr;      /* my IP */
@@ -325,7 +325,7 @@ typedef struct ksock_peer
 {
         struct list_head    ksnp_list;          /* stash on global peer list */
         lnet_process_id_t   ksnp_id;            /* who's on the other end(s) */
-        atomic_t            ksnp_refcount;      /* # users */
+        cfs_atomic_t        ksnp_refcount;      /* # users */
         int                 ksnp_sharecount;    /* lconf usage counter */
         int                 ksnp_closing;       /* being closed */
         int                 ksnp_accepting;     /* # passive connections pending */
@@ -336,7 +336,7 @@ typedef struct ksock_peer
         struct list_head    ksnp_conns;         /* all active connections */
         struct list_head    ksnp_routes;        /* routes */
         struct list_head    ksnp_tx_queue;      /* waiting packets */
-        spinlock_t          ksnp_lock;          /* serialize, NOT safe in g_lock */
+        cfs_spinlock_t      ksnp_lock;          /* serialize, NOT safe in g_lock */
         struct list_head    ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
         cfs_time_t          ksnp_last_alive;    /* when (in jiffies) I was last alive */
         lnet_ni_t          *ksnp_ni;            /* which network */
@@ -396,8 +396,8 @@ ksocknal_nid2peerlist (lnet_nid_t nid)
 static inline void
 ksocknal_conn_addref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
-        atomic_inc(&conn->ksnc_conn_refcount);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+        cfs_atomic_inc(&conn->ksnc_conn_refcount);
 }
 
 extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
@@ -405,8 +405,8 @@ extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
 static inline void
 ksocknal_conn_decref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
-        if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&conn->ksnc_conn_refcount))
                 ksocknal_queue_zombie_conn(conn);
 }
 
@@ -415,13 +415,13 @@ ksocknal_connsock_addref (ksock_conn_t *conn)
 {
         int   rc = -ESHUTDOWN;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
         if (!conn->ksnc_closing) {
-                LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
-                atomic_inc(&conn->ksnc_sock_refcount);
+                LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+                cfs_atomic_inc(&conn->ksnc_sock_refcount);
                 rc = 0;
         }
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -429,8 +429,8 @@ ksocknal_connsock_addref (ksock_conn_t *conn)
 static inline void
 ksocknal_connsock_decref (ksock_conn_t *conn)
 {
-        LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
-        if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+        LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
                 LASSERT (conn->ksnc_closing);
                 libcfs_sock_release(conn->ksnc_sock);
                 conn->ksnc_sock = NULL;
@@ -440,8 +440,8 @@ ksocknal_connsock_decref (ksock_conn_t *conn)
 static inline void
 ksocknal_tx_addref (ksock_tx_t *tx)
 {
-        LASSERT (atomic_read(&tx->tx_refcount) > 0);
-        atomic_inc(&tx->tx_refcount);
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+        cfs_atomic_inc(&tx->tx_refcount);
 }
 
 extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
@@ -449,16 +449,16 @@ extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
 static inline void
 ksocknal_tx_decref (ksock_tx_t *tx)
 {
-        LASSERT (atomic_read(&tx->tx_refcount) > 0);
-        if (atomic_dec_and_test(&tx->tx_refcount))
+        LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&tx->tx_refcount))
                 ksocknal_tx_done(NULL, tx);
 }
 
 static inline void
 ksocknal_route_addref (ksock_route_t *route)
 {
-        LASSERT (atomic_read(&route->ksnr_refcount) > 0);
-        atomic_inc(&route->ksnr_refcount);
+        LASSERT (cfs_atomic_read(&route->ksnr_refcount) > 0);
+        cfs_atomic_inc(&route->ksnr_refcount);
 }
 
 extern void ksocknal_destroy_route (ksock_route_t *route);
@@ -466,16 +466,16 @@ extern void ksocknal_destroy_route (ksock_route_t *route);
 static inline void
 ksocknal_route_decref (ksock_route_t *route)
 {
-        LASSERT (atomic_read (&route->ksnr_refcount) > 0);
-        if (atomic_dec_and_test(&route->ksnr_refcount))
+        LASSERT (cfs_atomic_read (&route->ksnr_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&route->ksnr_refcount))
                 ksocknal_destroy_route (route);
 }
 
 static inline void
 ksocknal_peer_addref (ksock_peer_t *peer)
 {
-        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
-        atomic_inc(&peer->ksnp_refcount);
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+        cfs_atomic_inc(&peer->ksnp_refcount);
 }
 
 extern void ksocknal_destroy_peer (ksock_peer_t *peer);
@@ -483,8 +483,8 @@ extern void ksocknal_destroy_peer (ksock_peer_t *peer);
 static inline void
 ksocknal_peer_decref (ksock_peer_t *peer)
 {
-        LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
-        if (atomic_dec_and_test(&peer->ksnp_refcount))
+        LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+        if (cfs_atomic_dec_and_test(&peer->ksnp_refcount))
                 ksocknal_destroy_peer (peer);
 }
 
@@ -551,3 +551,7 @@ extern int ksocknal_lib_tunables_init(void);
 extern void ksocknal_lib_tunables_fini(void);
 
 extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+
+extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+extern __u64 ksocknal_lib_new_incarnation(void);
+extern int ksocknal_lib_bind_thread_to_cpu(int id);
index 85e687a..6fe8111 100644 (file)
@@ -33,7 +33,7 @@ ksocknal_alloc_tx (int size)
 
         if (size == KSOCK_NOOP_TX_SIZE) {
                 /* searching for a noop tx in free list */
-                spin_lock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
                 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                         tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
@@ -42,7 +42,7 @@ ksocknal_alloc_tx (int size)
                         list_del(&tx->tx_list);
                 }
 
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         }
 
         if (tx == NULL)
@@ -51,9 +51,9 @@ ksocknal_alloc_tx (int size)
         if (tx == NULL)
                 return NULL;
 
-        atomic_set(&tx->tx_refcount, 1);
+        cfs_atomic_set(&tx->tx_refcount, 1);
         tx->tx_desc_size = size;
-        atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+        cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
 
         return tx;
 }
@@ -61,15 +61,15 @@ ksocknal_alloc_tx (int size)
 void
 ksocknal_free_tx (ksock_tx_t *tx)
 {
-        atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+        cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
         if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
                 /* it's a noop tx */
-                spin_lock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
 
                 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
 
-                spin_unlock(&ksocknal_data.ksnd_tx_lock);
+                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
         } else {
                 LIBCFS_FREE(tx, tx->tx_desc_size);
         }
@@ -188,7 +188,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                         rc = ksocknal_send_kiov (conn, tx);
                 }
 
-                bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
+                bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
                 if (rc > 0)                     /* sent something? */
                         conn->ksnc_tx_bufnob += rc; /* account it */
 
@@ -199,42 +199,23 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
                         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
                         conn->ksnc_tx_bufnob = bufnob;
-                        mb();
+                        cfs_mb();
                 }
 
                 if (rc <= 0) { /* Didn't write anything? */
-                        ksock_sched_t *sched;
 
                         if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
                                 rc = -EAGAIN;
 
-                        if (rc != -EAGAIN)
-                                break;
-
                         /* Check if EAGAIN is due to memory pressure */
-
-                        sched = conn->ksnc_scheduler;
-                        spin_lock_bh (&sched->kss_lock);
-
-                        if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
-                            !conn->ksnc_tx_ready) {
-                                /* SOCK_NOSPACE is set when the socket fills
-                                 * and cleared in the write_space callback
-                                 * (which also sets ksnc_tx_ready).  If
-                                 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
-                                 * zero, I didn't fill the socket and
-                                 * write_space won't reschedule me, so I
-                                 * return -ENOMEM to get my caller to retry
-                                 * after a timeout */
+                        if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
                                 rc = -ENOMEM;
-                        }
 
-                        spin_unlock_bh (&sched->kss_lock);
                         break;
                 }
 
                 /* socket's wmem_queued now includes 'rc' bytes */
-                atomic_sub (rc, &conn->ksnc_tx_nob);
+                cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
                 rc = 0;
 
         } while (tx->tx_resid != 0);
@@ -265,7 +246,7 @@ ksocknal_recv_iov (ksock_conn_t *conn)
         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
         conn->ksnc_rx_deadline =
                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();                           /* order with setting rx_started */
+        cfs_mb();                       /* order with setting rx_started */
         conn->ksnc_rx_started = 1;
 
         conn->ksnc_rx_nob_wanted -= nob;
@@ -309,7 +290,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
         conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
         conn->ksnc_rx_deadline =
                 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-        mb();                           /* order with setting rx_started */
+        cfs_mb();                       /* order with setting rx_started */
         conn->ksnc_rx_started = 1;
 
         conn->ksnc_rx_nob_wanted -= nob;
@@ -422,7 +403,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
 
                 list_del (&tx->tx_list);
 
-                LASSERT (atomic_read(&tx->tx_refcount) == 1);
+                LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
                 ksocknal_tx_done (ni, tx);
         }
 }
@@ -461,13 +442,13 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
 
         ksocknal_tx_addref(tx);
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         LASSERT (tx->tx_msg.ksm_zc_req_cookie == 0);
         tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++;
         list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 }
 
 static void
@@ -475,18 +456,18 @@ ksocknal_unzc_req(ksock_tx_t *tx)
 {
         ksock_peer_t   *peer = tx->tx_conn->ksnc_peer;
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         if (tx->tx_msg.ksm_zc_req_cookie == 0) {
                 /* Not waiting for an ACK */
-                spin_unlock(&peer->ksnp_lock);
+                cfs_spin_unlock(&peer->ksnp_lock);
                 return;
         }
 
         tx->tx_msg.ksm_zc_req_cookie = 0;
         list_del(&tx->tx_zc_list);
 
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         ksocknal_tx_decref(tx);
 }
@@ -521,10 +502,10 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                 counter++;   /* exponential backoff warnings */
                 if ((counter & (-counter)) == counter)
                         CWARN("%u ENOMEM tx %p (%u allocated)\n",
-                              counter, conn, atomic_read(&libcfs_kmemory));
+                              counter, conn, cfs_atomic_read(&libcfs_kmemory));
 
                 /* Queue on ksnd_enomem_conns for retry after a timeout */
-                spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                 /* enomem list takes over scheduler's ref... */
                 LASSERT (conn->ksnc_tx_scheduled);
@@ -535,7 +516,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                                    ksocknal_data.ksnd_reaper_waketime))
                         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
-                spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
                 return (rc);
         }
 
@@ -585,13 +566,13 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
         route->ksnr_scheduled = 1;              /* scheduling conn for connd */
         ksocknal_route_addref(route);           /* extra ref for connd */
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         list_add_tail (&route->ksnr_connd_list,
                        &ksocknal_data.ksnd_connd_routes);
         cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 }
 
 ksock_conn_t *
@@ -610,8 +591,8 @@ ksocknal_find_conn_locked (int payload_nob, ksock_peer_t *peer)
 #if SOCKNAL_ROUND_ROBIN
                 const int     nob = 0;
 #else
-                int           nob = atomic_read(&c->ksnc_tx_nob) +
-                                        SOCK_WMEM_QUEUED(c->ksnc_sock);
+                int           nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+                                        libcfs_sock_wmem_queued(c->ksnc_sock);
 #endif
                 LASSERT (!c->ksnc_closing);
                 LASSERT (c->ksnc_proto != NULL);
@@ -753,7 +734,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                                               KSOCK_MSG_NOOP,
                 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
-        atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+        cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
         tx->tx_conn = conn;
         ksocknal_conn_addref(conn); /* +1 ref for tx */
 
@@ -762,15 +743,15 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
          * a blockable lock(socket lock), so SOCK_WMEM_QUEUED can't be
          * put in spinlock. 
          */
-        bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
-        spin_lock_bh (&sched->kss_lock);
+        bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
                 /* First packet starts the timeout */
                 conn->ksnc_tx_deadline =
                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
                 conn->ksnc_tx_bufnob = 0;
-                mb();    /* order with adding to tx_queue */
+                cfs_mb(); /* order with adding to tx_queue */
         }
 
         ztx = NULL;
@@ -784,7 +765,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 if (conn->ksnc_tx_mono != NULL) {
                         if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) {
                                 /* zc-ack cookie is piggybacked */
-                                atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
+                                cfs_atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
                                 ztx = tx;       /* Put to freelist later */
                         } else {
                                 /* no packet can piggyback zc-ack cookie */
@@ -815,7 +796,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                                 list_add(&tx->tx_list, &ztx->tx_list);
                                 list_del(&ztx->tx_list);
 
-                                atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+                                cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
                         } else {
                                 /* no noop zc-ack packet, just enqueue it */
                                 LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET);
@@ -845,7 +826,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 }
 
 ksock_route_t *
@@ -902,7 +883,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         ksock_peer_t     *peer;
         ksock_conn_t     *conn;
         ksock_route_t    *route;
-        rwlock_t         *g_lock;
+        cfs_rwlock_t     *g_lock;
         int               retry;
         int               rc;
 
@@ -913,7 +894,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 
         for (retry = 0;; retry = 1) {
 #if !SOCKNAL_ROUND_ROBIN
-                read_lock (g_lock);
+                cfs_read_lock (g_lock);
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL) {
                         if (ksocknal_find_connectable_route_locked(peer) == NULL) {
@@ -923,22 +904,22 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
                                          * connecting and I do have an actual
                                          * connection... */
                                         ksocknal_queue_tx_locked (tx, conn);
-                                        read_unlock (g_lock);
+                                        cfs_read_unlock (g_lock);
                                         return (0);
                                 }
                         }
                 }
 
                 /* I'll need a write lock... */
-                read_unlock (g_lock);
+                cfs_read_unlock (g_lock);
 #endif
-                write_lock_bh (g_lock);
+                cfs_write_lock_bh (g_lock);
 
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL)
                         break;
 
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
 
                 if ((id.pid & LNET_PID_USERFLAG) != 0) {
                         CERROR("Refusing to create a connection to "
@@ -974,7 +955,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         if (conn != NULL) {
                 /* Connection exists; queue message on it */
                 ksocknal_queue_tx_locked (tx, conn);
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
                 return (0);
         }
 
@@ -982,11 +963,11 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
             ksocknal_find_connecting_route_locked (peer) != NULL) {
                 /* Queue the message until a connection is established */
                 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
-                write_unlock_bh (g_lock);
+                cfs_write_unlock_bh (g_lock);
                 return 0;
         }
 
-        write_unlock_bh (g_lock);
+        cfs_write_unlock_bh (g_lock);
 
         /* NB Routes may be ignored if connections to them failed recently */
         CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id));
@@ -1017,7 +998,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         LASSERT (payload_niov <= LNET_MAX_IOV);
         /* payload is either all vaddrs or all pages */
         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-        LASSERT (!in_interrupt ());
+        LASSERT (!cfs_in_interrupt ());
 
         if (payload_iov != NULL)
                 desc_size = offsetof(ksock_tx_t,
@@ -1072,18 +1053,18 @@ ksocknal_thread_start (int (*fn)(void *arg), void *arg)
         if (pid < 0)
                 return ((int)pid);
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads++;
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
         return (0);
 }
 
 void
 ksocknal_thread_fini (void)
 {
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads--;
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -1104,7 +1085,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
 
         if (nob_to_skip == 0) {         /* right at next packet boundary now */
                 conn->ksnc_rx_started = 0;
-                mb ();                          /* racing with timeout thread */
+                cfs_mb();                       /* racing with timeout thread */
 
                 switch (conn->ksnc_proto->pro_version) {
                 case  KSOCK_PROTO_V2:
@@ -1176,22 +1157,22 @@ ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie)
         ksock_sched_t  *sched;
         int             rc;
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = ksocknal_find_conn_locked (0, peer);
         if (conn == NULL) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 CERROR("Can't find connection to send zcack.\n");
                 return -ECONNRESET;
         }
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
         rc = ksocknal_piggyback_zcack(conn, cookie);
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
         if (rc) {
                 /* Ack cookie is piggybacked */
                 return 0;
@@ -1213,18 +1194,18 @@ ksocknal_handle_zc_req(ksock_peer_t *peer, __u64 cookie)
         ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
         tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */
 
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = ksocknal_find_conn_locked (0, peer);
         if (conn == NULL) {
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 ksocknal_free_tx(tx);
                 CERROR("Can't find connection to send zcack.\n");
                 return -ECONNRESET;
         }
         ksocknal_queue_tx_locked(tx, conn);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         return 0;
 }
@@ -1236,7 +1217,7 @@ ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie)
         ksock_tx_t             *tx;
         struct list_head       *ctmp;
 
-        spin_lock(&peer->ksnp_lock);
+        cfs_spin_lock(&peer->ksnp_lock);
 
         list_for_each(ctmp, &peer->ksnp_zc_req_list) {
                 tx = list_entry (ctmp, ksock_tx_t, tx_zc_list);
@@ -1246,12 +1227,12 @@ ksocknal_handle_zc_ack(ksock_peer_t *peer, __u64 cookie)
                 tx->tx_msg.ksm_zc_req_cookie = 0;
                 list_del(&tx->tx_zc_list);
 
-                spin_unlock(&peer->ksnp_lock);
+                cfs_spin_unlock(&peer->ksnp_lock);
 
                 ksocknal_tx_decref(tx);
                 return 0;
         }
-        spin_unlock(&peer->ksnp_lock);
+        cfs_spin_unlock(&peer->ksnp_lock);
 
         return -EPROTO;
 }
@@ -1261,7 +1242,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
 {
         int           rc;
 
-        LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
 
         /* NB: sched lock NOT held */
         /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
@@ -1475,7 +1456,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         LASSERT (conn->ksnc_rx_scheduled);
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_PARSE_WAIT:
@@ -1491,7 +1472,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         ksocknal_conn_decref(conn);
         return (0);
 }
@@ -1501,13 +1482,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
 {
         int           rc;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         rc = (!ksocknal_data.ksnd_shuttingdown &&
               list_empty(&sched->kss_rx_conns) &&
               list_empty(&sched->kss_tx_conns));
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         return (rc);
 }
 
@@ -1525,18 +1506,10 @@ int ksocknal_scheduler (void *arg)
         cfs_daemonize (name);
         cfs_block_allsigs ();
 
-#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
-        id = ksocknal_sched2cpu(id);
-        if (cpu_online(id)) {
-                cpumask_t m = CPU_MASK_NONE;
-                cpu_set(id, m);
-                set_cpus_allowed(current, m);
-        } else {
+        if (ksocknal_lib_bind_thread_to_cpu(id))
                 CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
-        }
-#endif /* CONFIG_SMP && CPU_AFFINITY */
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
                 int did_something = 0;
@@ -1556,11 +1529,11 @@ int ksocknal_scheduler (void *arg)
                          * data_ready can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_rx_ready = 0;
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         rc = ksocknal_process_receive(conn);
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
 
                         /* I'm the only one that can clear this flag */
                         LASSERT(conn->ksnc_rx_scheduled);
@@ -1617,7 +1590,7 @@ int ksocknal_scheduler (void *arg)
                          * write_space can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_tx_ready = 0;
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         if (!list_empty(&zlist)) {
                                 /* free zombie noop txs, it's fast because 
@@ -1629,13 +1602,13 @@ int ksocknal_scheduler (void *arg)
 
                         if (rc == -ENOMEM || rc == -EAGAIN) {
                                 /* Incomplete send: replace tx on HEAD of tx_queue */
-                                spin_lock_bh (&sched->kss_lock);
+                                cfs_spin_lock_bh (&sched->kss_lock);
                                 list_add (&tx->tx_list, &conn->ksnc_tx_queue);
                         } else {
                                 /* Complete send; tx -ref */
                                 ksocknal_tx_decref (tx);
 
-                                spin_lock_bh (&sched->kss_lock);
+                                cfs_spin_lock_bh (&sched->kss_lock);
                                 /* assume space for more */
                                 conn->ksnc_tx_ready = 1;
                         }
@@ -1658,24 +1631,24 @@ int ksocknal_scheduler (void *arg)
                 }
                 if (!did_something ||           /* nothing to do */
                     ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
 
                         nloops = 0;
 
                         if (!did_something) {   /* wait for something to do */
-                                rc = wait_event_interruptible_exclusive(
+                                cfs_wait_event_interruptible_exclusive(
                                         sched->kss_waitq,
-                                        !ksocknal_sched_cansleep(sched));
+                                        !ksocknal_sched_cansleep(sched), rc);
                                 LASSERT (rc == 0);
                         } else {
                                 our_cond_resched();
                         }
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
                 }
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
         ksocknal_thread_fini ();
         return (0);
 }
@@ -1691,7 +1664,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         conn->ksnc_rx_ready = 1;
 
@@ -1704,7 +1677,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
                 cfs_waitq_signal (&sched->kss_waitq);
         }
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
         EXIT;
 }
@@ -1720,7 +1693,7 @@ void ksocknal_write_callback (ksock_conn_t *conn)
 
         sched = conn->ksnc_scheduler;
 
-        spin_lock_bh (&sched->kss_lock);
+        cfs_spin_lock_bh (&sched->kss_lock);
 
         conn->ksnc_tx_ready = 1;
 
@@ -1735,7 +1708,7 @@ void ksocknal_write_callback (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        spin_unlock_bh (&sched->kss_lock);
+        cfs_spin_unlock_bh (&sched->kss_lock);
 
         EXIT;
 }
@@ -2335,7 +2308,7 @@ ksocknal_connect (ksock_route_t *route)
         deadline = cfs_time_add(cfs_time_current(),
                                 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         LASSERT (route->ksnr_scheduled);
         LASSERT (!route->ksnr_connecting);
@@ -2375,7 +2348,7 @@ ksocknal_connect (ksock_route_t *route)
                         type = SOCKLND_CONN_BULK_OUT;
                 }
 
-                write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+                cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
                 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
                         rc = -ETIMEDOUT;
@@ -2406,7 +2379,7 @@ ksocknal_connect (ksock_route_t *route)
                         CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
                                libcfs_nid2str(peer->ksnp_id.nid));
 
-                write_lock_bh (&ksocknal_data.ksnd_global_lock);
+                cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
         }
 
         route->ksnr_scheduled = 0;
@@ -2418,11 +2391,11 @@ ksocknal_connect (ksock_route_t *route)
                 ksocknal_launch_connection_locked(route);
         }
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
         return;
 
  failed:
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         route->ksnr_scheduled = 0;
         route->ksnr_connecting = 0;
@@ -2460,7 +2433,7 @@ ksocknal_connect (ksock_route_t *route)
                 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
         }
 #endif
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         ksocknal_peer_failed(peer);
         ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
@@ -2481,13 +2454,13 @@ ksocknal_connd_ready(void)
 {
         int            rc;
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         rc = ksocknal_data.ksnd_shuttingdown ||
              !list_empty(&ksocknal_data.ksnd_connd_connreqs) ||
              ksocknal_connd_connect_route_locked();
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
         return rc;
 }
@@ -2499,12 +2472,13 @@ ksocknal_connd (void *arg)
         char               name[16];
         ksock_connreq_t   *cr;
         ksock_route_t     *route;
+        int                rc = 0;
 
         snprintf (name, sizeof (name), "socknal_cd%02ld", id);
         cfs_daemonize (name);
         cfs_block_allsigs ();
 
-        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
@@ -2514,14 +2488,14 @@ ksocknal_connd (void *arg)
                                         ksock_connreq_t, ksncr_list);
 
                         list_del(&cr->ksncr_list);
-                        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                         ksocknal_create_conn(cr->ksncr_ni, NULL,
                                              cr->ksncr_sock, SOCKLND_CONN_NONE);
                         lnet_ni_decref(cr->ksncr_ni);
                         LIBCFS_FREE(cr, sizeof(*cr));
 
-                        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
                 }
 
                 if (ksocknal_connd_connect_route_locked()) {
@@ -2531,25 +2505,25 @@ ksocknal_connd (void *arg)
 
                         list_del (&route->ksnr_connd_list);
                         ksocknal_data.ksnd_connd_connecting++;
-                        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
                         ksocknal_connect (route);
                         ksocknal_route_decref(route);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
                         ksocknal_data.ksnd_connd_connecting--;
                 }
 
-                spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
-                wait_event_interruptible_exclusive(
+                cfs_wait_event_interruptible_exclusive(
                         ksocknal_data.ksnd_connd_waitq,
-                        ksocknal_connd_ready());
+                        ksocknal_connd_ready(), rc);
 
-                spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
         }
 
-        spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
 
         ksocknal_thread_fini ();
         return (0);
@@ -2571,7 +2545,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
 
                 /* SOCK_ERROR will reset error code of socket in
                  * some platform (like Darwin8.x) */
-                error = SOCK_ERROR(conn->ksnc_sock);
+                error = libcfs_sock_error(conn->ksnc_sock);
                 if (error != 0) {
                         ksocknal_conn_addref(conn);
 
@@ -2622,7 +2596,7 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
                 }
 
                 if ((!list_empty(&conn->ksnc_tx_queue) ||
-                     SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
+                     libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
                     cfs_time_aftereq(cfs_time_current(),
                                      conn->ksnc_tx_deadline)) {
                         /* Timed out messages queued for sending or
@@ -2653,14 +2627,14 @@ ksocknal_check_peer_timeouts (int idx)
         /* NB. We expect to have a look at all the peers and not find any
          * connections to time out, so we just use a shared lock while we
          * take a look... */
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         list_for_each (ptmp, peers) {
                 peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
                 conn = ksocknal_find_timed_out_conn (peer);
 
                 if (conn != NULL) {
-                        read_unlock (&ksocknal_data.ksnd_global_lock);
+                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
                         ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
 
@@ -2672,7 +2646,7 @@ ksocknal_check_peer_timeouts (int idx)
                 }
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -2694,7 +2668,7 @@ ksocknal_reaper (void *arg)
         CFS_INIT_LIST_HEAD(&enomem_conns);
         cfs_waitlink_init (&wait);
 
-        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
@@ -2703,12 +2677,12 @@ ksocknal_reaper (void *arg)
                                            ksock_conn_t, ksnc_list);
                         list_del (&conn->ksnc_list);
 
-                        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                         ksocknal_terminate_conn (conn);
                         ksocknal_conn_decref(conn);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2717,11 +2691,11 @@ ksocknal_reaper (void *arg)
                                            ksock_conn_t, ksnc_list);
                         list_del (&conn->ksnc_list);
 
-                        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                         ksocknal_destroy_conn (conn);
 
-                        spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2730,7 +2704,7 @@ ksocknal_reaper (void *arg)
                         list_del_init(&ksocknal_data.ksnd_enomem_conns);
                 }
 
-                spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
                 /* reschedule all the connections that stalled with ENOMEM... */
                 nenomem_conns = 0;
@@ -2741,14 +2715,14 @@ ksocknal_reaper (void *arg)
 
                         sched = conn->ksnc_scheduler;
 
-                        spin_lock_bh (&sched->kss_lock);
+                        cfs_spin_lock_bh (&sched->kss_lock);
 
                         LASSERT (conn->ksnc_tx_scheduled);
                         conn->ksnc_tx_ready = 1;
                         list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
                         cfs_waitq_signal (&sched->kss_waitq);
 
-                        spin_unlock_bh (&sched->kss_lock);
+                        cfs_spin_unlock_bh (&sched->kss_lock);
                         nenomem_conns++;
                 }
 
@@ -2790,7 +2764,7 @@ ksocknal_reaper (void *arg)
                 ksocknal_data.ksnd_reaper_waketime =
                         cfs_time_add(cfs_time_current(), timeout);
 
-                set_current_state (TASK_INTERRUPTIBLE);
+                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                 if (!ksocknal_data.ksnd_shuttingdown &&
@@ -2798,13 +2772,13 @@ ksocknal_reaper (void *arg)
                     list_empty (&ksocknal_data.ksnd_zombie_conns))
                         cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
 
-                set_current_state (TASK_RUNNING);
+                cfs_set_current_state (CFS_TASK_RUNNING);
                 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
-                spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
         }
 
-        spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
 
         ksocknal_thread_fini ();
         return (0);
index 063c5b9..800d4f5 100644 (file)
@@ -342,13 +342,13 @@ ksocknal_lib_bind_irq (unsigned int irq)
 
         info = &ksocknal_data.ksnd_irqinfo[irq];
 
-        write_lock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
 
         LASSERT (info->ksni_valid);
         bind = !info->ksni_bound;
         info->ksni_bound = 1;
 
-        write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
 
         if (!bind)                              /* bound already */
                 return;
@@ -1007,7 +1007,7 @@ ksocknal_data_ready (struct sock *sk, int n)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         if (conn == NULL) {             /* raced with ksocknal_terminate_conn */
@@ -1016,7 +1016,7 @@ ksocknal_data_ready (struct sock *sk, int n)
         } else
                 ksocknal_read_callback(conn);
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 
         EXIT;
 }
@@ -1030,7 +1030,7 @@ ksocknal_write_space (struct sock *sk)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        read_lock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         wspace = SOCKNAL_WSPACE(sk);
@@ -1049,7 +1049,7 @@ ksocknal_write_space (struct sock *sk)
                 LASSERT (sk->sk_write_space != &ksocknal_write_space);
                 sk->sk_write_space (sk);
 
-                read_unlock (&ksocknal_data.ksnd_global_lock);
+                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
                 return;
         }
 
@@ -1063,7 +1063,7 @@ ksocknal_write_space (struct sock *sk)
                 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
         }
 
-        read_unlock (&ksocknal_data.ksnd_global_lock);
+        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
 }
 
 void
@@ -1098,3 +1098,64 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 
         return ;
 }
+
+int
+ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+{
+        int            rc = 0;
+        ksock_sched_t *sched;
+        
+        sched = conn->ksnc_scheduler;
+        cfs_spin_lock_bh (&sched->kss_lock);
+        
+        if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
+            !conn->ksnc_tx_ready) {
+                /* SOCK_NOSPACE is set when the socket fills
+                 * and cleared in the write_space callback
+                 * (which also sets ksnc_tx_ready).  If
+                 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
+                 * zero, I didn't fill the socket and
+                 * write_space won't reschedule me, so I
+                 * return -ENOMEM to get my caller to retry
+                 * after a timeout */
+                rc = -ENOMEM;
+        }
+        
+        cfs_spin_unlock_bh (&sched->kss_lock);
+
+        return rc;
+}
+
+__u64
+ksocknal_lib_new_incarnation(void)
+{
+        struct timeval tv;
+
+        /* The incarnation number is the time this module loaded and it
+         * identifies this particular instance of the socknal.  Hopefully
+         * we won't be able to reboot more frequently than 1MHz for the
+         * forseeable future :) */
+
+        do_gettimeofday(&tv);
+
+        return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+int
+ksocknal_lib_bind_thread_to_cpu(int id)
+{
+#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
+        id = ksocknal_sched2cpu(id);
+        if (cpu_online(id)) {
+                cpumask_t m = CPU_MASK_NONE;
+                cpu_set(id, m);
+                set_cpus_allowed(current, m);
+                return 0;
+        }
+
+        return -1;
+
+#else
+        return 0;
+#endif
+}