* - wait_for_completion(c)
*/
+/*
+ * spinlock "implementation"
+ */
+
+typedef spinlock_t cfs_spinlock_t;
+
+#define cfs_spin_lock_init(lock) spin_lock_init(lock)
+#define cfs_spin_lock(lock) spin_lock(lock)
+#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
+#define cfs_spin_unlock(lock) spin_unlock(lock)
+#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
+
+/*
+ * rwlock "implementation"
+ */
+
+typedef rwlock_t cfs_rwlock_t;
+
+#define cfs_rwlock_init(lock) rwlock_init(lock)
+#define cfs_read_lock(lock) read_lock(lock)
+#define cfs_read_unlock(lock) read_unlock(lock)
+#define cfs_write_lock_bh(lock) write_lock_bh(lock)
+#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
+
/* __KERNEL__ */
#else
*/
#define CFS_TASK_INTERRUPTIBLE TASK_INTERRUPTIBLE
#define CFS_TASK_UNINT TASK_UNINTERRUPTIBLE
+#define CFS_TASK_RUNNING TASK_RUNNING
+
+#define cfs_set_current_state(state) set_current_state(state)
typedef wait_queue_t cfs_waitlink_t;
typedef wait_queue_head_t cfs_waitq_t;
#define cfs_waitq_wait_event_interruptible_timeout wait_event_interruptible_timeout
#endif
+#define cfs_wait_event_interruptible_exclusive(wq, condition, rc) \
+({ \
+ rc = wait_event_interruptible_exclusive(wq, condition); \
+})
+
+/*
+ * atomic
+ */
+
+typedef atomic_t cfs_atomic_t;
+
+#define cfs_atomic_read(atom) atomic_read(atom)
+#define cfs_atomic_inc(atom) atomic_inc(atom)
+#define cfs_atomic_dec(atom) atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
+#define cfs_atomic_set(atom, value) atomic_set(atom, value)
+#define cfs_atomic_add(value, atom) atomic_add(value, atom)
+#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
+
+/*
+ * membar
+ */
+
+#define cfs_mb() mb()
+
+/*
+ * interrupt
+ */
+
+#define cfs_in_interrupt() in_interrupt()
+
#else /* !__KERNEL__ */
typedef struct proc_dir_entry cfs_proc_dir_entry_t;
#endif
#define SOCK_SNDBUF(so) ((so)->sk->sk_sndbuf)
-#define SOCK_WMEM_QUEUED(so) ((so)->sk->sk_wmem_queued)
-#define SOCK_ERROR(so) ((so)->sk->sk_err)
#define SOCK_TEST_NOSPACE(so) test_bit(SOCK_NOSPACE, &(so)->flags)
+static inline int
+libcfs_sock_error(struct socket *sock)
+{
+ return sock->sk->sk_err;
+}
+
+static inline int
+libcfs_sock_wmem_queued(struct socket *sock)
+{
+ return sock->sk->sk_wmem_queued;
+}
+
#else /* !__KERNEL__ */
#include "../user-tcpip.h"
if (route == NULL)
return (NULL);
- atomic_set (&route->ksnr_refcount, 1);
+ cfs_atomic_set (&route->ksnr_refcount, 1);
route->ksnr_peer = NULL;
route->ksnr_retry_interval = 0; /* OK to connect at any time */
route->ksnr_ipaddr = ipaddr;
void
ksocknal_destroy_route (ksock_route_t *route)
{
- LASSERT (atomic_read(&route->ksnr_refcount) == 0);
+ LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
if (route->ksnr_peer != NULL)
ksocknal_peer_decref(route->ksnr_peer);
LASSERT (id.nid != LNET_NID_ANY);
LASSERT (id.pid != LNET_PID_ANY);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LIBCFS_ALLOC (peer, sizeof (*peer));
if (peer == NULL)
peer->ksnp_ni = ni;
peer->ksnp_id = id;
- atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
+ cfs_atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
peer->ksnp_closing = 0;
peer->ksnp_accepting = 0;
peer->ksnp_zc_next_cookie = 1;
CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
- spin_lock_init(&peer->ksnp_lock);
+ cfs_spin_lock_init(&peer->ksnp_lock);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
if (net->ksnn_shutdown) {
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
LIBCFS_FREE(peer, sizeof(*peer));
CERROR("Can't create peer: network shutdown\n");
net->ksnn_npeers++;
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
*peerp = peer;
return 0;
CDEBUG (D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
- LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+ LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
LASSERT (peer->ksnp_accepting == 0);
LASSERT (list_empty (&peer->ksnp_conns));
LASSERT (list_empty (&peer->ksnp_routes));
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
* zero. */
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
net->ksnn_npeers--;
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
}
ksock_peer_t *
CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
peer, libcfs_id2str(id),
- atomic_read(&peer->ksnp_refcount));
+ cfs_atomic_read(&peer->ksnp_refcount));
return (peer);
}
return (NULL);
{
ksock_peer_t *peer;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked (ni, id);
if (peer != NULL) /* +1 ref for caller? */
ksocknal_peer_addref(peer);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (peer);
}
int j;
int rc = -ENOENT;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
}
}
out:
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (rc);
}
return (-ENOMEM);
}
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
route2->ksnr_share_count++;
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (0);
}
int i;
int rc = -ENOENT;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(ni, &zombies, 1);
struct list_head *ctmp;
int i;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
ksocknal_conn_addref(conn);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (conn);
}
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (NULL);
}
int i;
int nip;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
nip = net->ksnn_ninterfaces;
LASSERT (nip <= LNET_MAX_INTERFACES);
/* Only offer interfaces for additional connections if I have
* more than one. */
if (nip < 2) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return 0;
}
LASSERT (ipaddrs[i] != 0);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (nip);
}
int
ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
/* Also note that I'm not going to return more than n_peerips
* interfaces, even if I have more myself */
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
LASSERT (n_peerips <= LNET_MAX_INTERFACES);
LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
/* Overwrite input peer IP addresses */
memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
return (n_ips);
}
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
ksock_route_t *newroute = NULL;
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
struct list_head *rtmp;
* expecting to be dealing with small numbers of interfaces, so the
* O(n**3)-ness here shouldn't matter */
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
if (net->ksnn_ninterfaces < 2) {
/* Only create additional connections
* if I have > 1 interface */
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
return;
}
if (newroute != NULL) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
if (newroute == NULL)
return;
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
}
if (peer->ksnp_closing) {
newroute = NULL;
}
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
if (newroute != NULL)
ksocknal_route_decref(newroute);
}
cr->ksncr_ni = ni;
cr->ksncr_sock = sock;
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
return 0;
}
ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
cfs_socket_t *sock, int type)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
CFS_LIST_HEAD (zombies);
lnet_process_id_t peerid;
struct list_head *tmp;
conn->ksnc_sock = sock;
/* 2 ref, 1 for conn, another extra ref prevents socket
* being closed before establishment of connection */
- atomic_set (&conn->ksnc_sock_refcount, 2);
+ cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
conn->ksnc_type = type;
ksocknal_lib_save_callback(sock, conn);
- atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+ cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
conn->ksnc_zc_capable = ksocknal_lib_zc_capable(sock);
conn->ksnc_rx_ready = 0;
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
conn->ksnc_tx_mono = NULL;
- atomic_set (&conn->ksnc_tx_nob, 0);
+ cfs_atomic_set (&conn->ksnc_tx_nob, 0);
LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
peerid = peer->ksnp_id;
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
conn->ksnc_proto = peer->ksnp_proto;
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
if (conn->ksnc_proto == NULL) {
conn->ksnc_proto = &ksocknal_protocol_v2x;
if (active) {
ksocknal_peer_addref(peer);
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
if (rc != 0)
goto failed_1;
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
/* called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
conn->ksnc_scheduler = sched;
/* Set the deadline for the outgoing HELLO to drain */
- conn->ksnc_tx_bufnob = SOCK_WMEM_QUEUED(sock);
+ conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with adding to peer's conn list */
+ cfs_mb(); /* order with adding to peer's conn list */
list_add (&conn->ksnc_list, &peer->ksnp_conns);
ksocknal_conn_addref(conn);
ksocknal_queue_tx_locked (tx, conn);
}
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
/* We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
if (rc == 0)
rc = ksocknal_lib_setup_sock(sock);
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
/* NB my callbacks block while I hold ksnd_global_lock */
ksocknal_lib_set_callback(sock, conn);
if (!active)
peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
if (rc != 0) {
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
ksocknal_close_conn_locked(conn, rc);
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
} else if (ksocknal_connsock_addref(conn) == 0) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_unlink_peer_locked(peer);
}
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
if (warn != NULL) {
if (rc < 0)
ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
}
ksocknal_txlist_done(ni, &zombies, 1);
}
}
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
}
void
* tell LNET I think the peer is dead if it's to another kernel and
* there are no connections or connection attempts in existance. */
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
list_empty(&peer->ksnp_conns) &&
peer->ksnp_last_alive);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (notify)
lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
LASSERT(conn->ksnc_closing);
/* wake up the scheduler to "send" all remaining packets to /dev/null */
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
/* a closing conn is always ready to tx */
conn->ksnc_tx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
- spin_lock(&peer->ksnp_lock);
+ cfs_spin_lock(&peer->ksnp_lock);
list_for_each_safe(tmp, nxt, &peer->ksnp_zc_req_list) {
tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
}
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
list_for_each_safe(tmp, nxt, &zlist) {
tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
}
/* serialise with callbacks */
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
peer->ksnp_error = 0; /* avoid multiple notifications */
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
if (failed)
ksocknal_peer_failed(peer);
{
/* Queue the conn for the reaper to destroy */
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) == 0);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
}
void
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
- LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
- LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+ LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
+ LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
LASSERT (conn->ksnc_sock == NULL);
LASSERT (conn->ksnc_route == NULL);
LASSERT (!conn->ksnc_tx_scheduled);
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (count);
}
int i;
int count = 0;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
ksock_conn_t *conn;
for (index = 0; ; index++) {
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
i = 0;
conn = NULL;
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (conn == NULL)
break;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
for (j = 0; ; j++) {
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
index = 0;
peer = NULL;
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (peer != NULL) {
rc = 0;
netmask == 0)
return (-EINVAL);
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
if (iface != NULL) {
/* NB only new connections will pay attention to the new interface! */
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (rc);
}
int i;
int j;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (rc);
}
ksock_net_t *net = ni->ni_data;
ksock_interface_t *iface;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if (data->ioc_count < 0 ||
data->ioc_count >= net->ksnn_ninterfaces) {
data->ioc_u32[3] = iface->ksni_nroutes;
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return rc;
}
void
ksocknal_free_buffers (void)
{
- LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
if (ksocknal_data.ksnd_schedulers != NULL)
LIBCFS_FREE (ksocknal_data.ksnd_schedulers,
sizeof (struct list_head) *
ksocknal_data.ksnd_peer_hash_size);
- spin_lock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
while(!list_empty(&zlist)) {
tx = list_entry(zlist.next, ksock_tx_t, tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
} else {
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
}
int i;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
LASSERT (ksocknal_data.ksnd_nnets == 0);
switch (ksocknal_data.ksnd_init) {
}
i = 4;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
while (ksocknal_data.ksnd_nthreads != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
cfs_pause(cfs_time_seconds(1));
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
ksocknal_free_buffers();
}
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
PORTAL_MODULE_UNUSE;
}
-
-__u64
-ksocknal_new_incarnation (void)
-{
- struct timeval tv;
-
- /* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal. Hopefully
- * we won't be able to reboot more frequently than 1MHz for the
- * forseeable future :) */
-
- do_gettimeofday(&tv);
-
- return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
int
ksocknal_base_startup (void)
{
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
- rwlock_init(&ksocknal_data.ksnd_global_lock);
+ cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
- spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
- spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
- spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
/* NB memset above zeros whole of ksocknal_data, including
for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
ksock_sched_t *kss = &ksocknal_data.ksnd_schedulers[i];
- spin_lock_init (&kss->kss_lock);
+ cfs_spin_lock_init (&kss->kss_lock);
CFS_INIT_LIST_HEAD (&kss->kss_rx_conns);
CFS_INIT_LIST_HEAD (&kss->kss_tx_conns);
CFS_INIT_LIST_HEAD (&kss->kss_zombie_noop_txs);
struct list_head *tmp;
int i;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
"closing %d, accepting %d, err %d, zcookie "LPU64", "
"txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
+ cfs_atomic_read(&peer->ksnp_refcount),
peer->ksnp_sharecount, peer->ksnp_closing,
peer->ksnp_accepting, peer->ksnp_error,
peer->ksnp_zc_next_cookie,
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
- "del %d\n", atomic_read(&route->ksnr_refcount),
+ "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
route->ksnr_connected, route->ksnr_deleted);
}
list_for_each (tmp, &peer->ksnp_conns) {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
+ cfs_atomic_read(&conn->ksnc_conn_refcount),
+ cfs_atomic_read(&conn->ksnc_sock_refcount),
conn->ksnc_type, conn->ksnc_closing);
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return;
}
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
net->ksnn_shutdown = 1; /* prevent new peers */
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
/* Delete all peers */
ksocknal_del_peer(ni, anyid, 0);
/* Wait for all peer state to clean up */
i = 2;
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
while (net->ksnn_npeers != 0) {
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
ksocknal_debug_peerhash(ni);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
}
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
goto fail_0;
memset(net, 0, sizeof(*net));
- spin_lock_init(&net->ksnn_lock);
- net->ksnn_incarnation = ksocknal_new_incarnation();
+ cfs_spin_lock_init(&net->ksnn_lock);
+ net->ksnn_incarnation = ksocknal_lib_new_incarnation();
ni->ni_data = net;
ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peercredits;
typedef struct /* per scheduler state */
{
- spinlock_t kss_lock; /* serialise */
+ cfs_spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
struct list_head kss_tx_conns; /* conn waiting to be written */
struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
typedef struct
{
__u64 ksnn_incarnation; /* my epoch */
- spinlock_t ksnn_lock; /* serialise */
+ cfs_spinlock_t ksnn_lock; /* serialise */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
int ksnd_init; /* initialisation state */
int ksnd_nnets; /* # networks set up */
- rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */
+ cfs_rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */
struct list_head *ksnd_peers; /* hash table of all my known peers */
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nschedulers; /* # schedulers */
ksock_sched_t *ksnd_schedulers; /* their state */
- atomic_t ksnd_nactive_txs; /* #active txs */
+ cfs_atomic_t ksnd_nactive_txs; /* #active txs */
struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/
struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
cfs_time_t ksnd_reaper_waketime; /* when reaper will wake */
- spinlock_t ksnd_reaper_lock; /* serialise */
+ cfs_spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
int ksnd_stall_tx; /* test sluggish sender */
struct list_head ksnd_connd_routes; /* routes waiting to be connected */
cfs_waitq_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting;/* # connds connecting */
- spinlock_t ksnd_connd_lock; /* serialise */
+ cfs_spinlock_t ksnd_connd_lock; /* serialise */
struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */
- spinlock_t ksnd_tx_lock; /* serialise, NOT safe in g_lock */
+ cfs_spinlock_t ksnd_tx_lock; /* serialise, NOT safe in g_lock */
ksock_irqinfo_t ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
{
struct list_head tx_list; /* queue on conn for transmission etc */
struct list_head tx_zc_list; /* queue on peer for ZC request */
- atomic_t tx_refcount; /* tx reference count */
+ cfs_atomic_t tx_refcount; /* tx reference count */
int tx_nob; /* # packet bytes */
int tx_resid; /* residual bytes */
int tx_niov; /* # packet iovec frags */
cfs_socket_t *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
- atomic_t ksnc_conn_refcount; /* conn refcount */
- atomic_t ksnc_sock_refcount; /* sock refcount */
+ cfs_atomic_t ksnc_conn_refcount; /* conn refcount */
+ cfs_atomic_t ksnc_sock_refcount; /* sock refcount */
ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
* b. noop ZC-ACK packet */
cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
int ksnc_tx_bufnob; /* send buffer marker */
- atomic_t ksnc_tx_nob; /* # bytes queued */
+ cfs_atomic_t ksnc_tx_nob; /* # bytes queued */
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
struct list_head ksnr_list; /* chain on peer route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
- atomic_t ksnr_refcount; /* # users */
+ cfs_atomic_t ksnr_refcount; /* # users */
cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
cfs_duration_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
{
struct list_head ksnp_list; /* stash on global peer list */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
- atomic_t ksnp_refcount; /* # users */
+ cfs_atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
int ksnp_closing; /* being closed */
int ksnp_accepting; /* # passive connections pending */
struct list_head ksnp_conns; /* all active connections */
struct list_head ksnp_routes; /* routes */
struct list_head ksnp_tx_queue; /* waiting packets */
- spinlock_t ksnp_lock; /* serialize, NOT safe in g_lock */
+ cfs_spinlock_t ksnp_lock; /* serialize, NOT safe in g_lock */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
lnet_ni_t *ksnp_ni; /* which network */
static inline void
ksocknal_conn_addref (ksock_conn_t *conn)
{
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
- atomic_inc(&conn->ksnc_conn_refcount);
+ LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+ cfs_atomic_inc(&conn->ksnc_conn_refcount);
}
extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
static inline void
ksocknal_conn_decref (ksock_conn_t *conn)
{
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+ LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&conn->ksnc_conn_refcount))
ksocknal_queue_zombie_conn(conn);
}
{
int rc = -ESHUTDOWN;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if (!conn->ksnc_closing) {
- LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
- atomic_inc(&conn->ksnc_sock_refcount);
+ LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+ cfs_atomic_inc(&conn->ksnc_sock_refcount);
rc = 0;
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (rc);
}
static inline void
ksocknal_connsock_decref (ksock_conn_t *conn)
{
- LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
- if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+ LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
LASSERT (conn->ksnc_closing);
libcfs_sock_release(conn->ksnc_sock);
conn->ksnc_sock = NULL;
static inline void
ksocknal_tx_addref (ksock_tx_t *tx)
{
- LASSERT (atomic_read(&tx->tx_refcount) > 0);
- atomic_inc(&tx->tx_refcount);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+ cfs_atomic_inc(&tx->tx_refcount);
}
extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
static inline void
ksocknal_tx_decref (ksock_tx_t *tx)
{
- LASSERT (atomic_read(&tx->tx_refcount) > 0);
- if (atomic_dec_and_test(&tx->tx_refcount))
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount))
ksocknal_tx_done(NULL, tx);
}
static inline void
ksocknal_route_addref (ksock_route_t *route)
{
- LASSERT (atomic_read(&route->ksnr_refcount) > 0);
- atomic_inc(&route->ksnr_refcount);
+ LASSERT (cfs_atomic_read(&route->ksnr_refcount) > 0);
+ cfs_atomic_inc(&route->ksnr_refcount);
}
extern void ksocknal_destroy_route (ksock_route_t *route);
static inline void
ksocknal_route_decref (ksock_route_t *route)
{
- LASSERT (atomic_read (&route->ksnr_refcount) > 0);
- if (atomic_dec_and_test(&route->ksnr_refcount))
+ LASSERT (cfs_atomic_read (&route->ksnr_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&route->ksnr_refcount))
ksocknal_destroy_route (route);
}
static inline void
ksocknal_peer_addref (ksock_peer_t *peer)
{
- LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
- atomic_inc(&peer->ksnp_refcount);
+ LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+ cfs_atomic_inc(&peer->ksnp_refcount);
}
extern void ksocknal_destroy_peer (ksock_peer_t *peer);
static inline void
ksocknal_peer_decref (ksock_peer_t *peer)
{
- LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
- if (atomic_dec_and_test(&peer->ksnp_refcount))
+ LASSERT (cfs_atomic_read (&peer->ksnp_refcount) > 0);
+ if (cfs_atomic_dec_and_test(&peer->ksnp_refcount))
ksocknal_destroy_peer (peer);
}
extern void ksocknal_lib_tunables_fini(void);
extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+
+extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+extern __u64 ksocknal_lib_new_incarnation(void);
+extern int ksocknal_lib_bind_thread_to_cpu(int id);
if (size == KSOCK_NOOP_TX_SIZE) {
/* searching for a noop tx in free list */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
list_del(&tx->tx_list);
}
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
if (tx == NULL)
if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_desc_size = size;
- atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+ cfs_atomic_inc(&ksocknal_data.ksnd_nactive_txs);
return tx;
}
void
ksocknal_free_tx (ksock_tx_t *tx)
{
- atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+ cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
if (tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
- spin_lock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
LIBCFS_FREE(tx, tx->tx_desc_size);
}
rc = ksocknal_send_kiov (conn, tx);
}
- bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
+ bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
if (rc > 0) /* sent something? */
conn->ksnc_tx_bufnob += rc; /* account it */
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_tx_bufnob = bufnob;
- mb();
+ cfs_mb();
}
if (rc <= 0) { /* Didn't write anything? */
- ksock_sched_t *sched;
if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
- if (rc != -EAGAIN)
- break;
-
/* Check if EAGAIN is due to memory pressure */
-
- sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
-
- if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
- !conn->ksnc_tx_ready) {
- /* SOCK_NOSPACE is set when the socket fills
- * and cleared in the write_space callback
- * (which also sets ksnc_tx_ready). If
- * SOCK_NOSPACE and ksnc_tx_ready are BOTH
- * zero, I didn't fill the socket and
- * write_space won't reschedule me, so I
- * return -ENOMEM to get my caller to retry
- * after a timeout */
+ if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
rc = -ENOMEM;
- }
- spin_unlock_bh (&sched->kss_lock);
break;
}
/* socket's wmem_queued now includes 'rc' bytes */
- atomic_sub (rc, &conn->ksnc_tx_nob);
+ cfs_atomic_sub (rc, &conn->ksnc_tx_nob);
rc = 0;
} while (tx->tx_resid != 0);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
+ cfs_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
+ cfs_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
list_del (&tx->tx_list);
- LASSERT (atomic_read(&tx->tx_refcount) == 1);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
}
}
ksocknal_tx_addref(tx);
- spin_lock(&peer->ksnp_lock);
+ cfs_spin_lock(&peer->ksnp_lock);
LASSERT (tx->tx_msg.ksm_zc_req_cookie == 0);
tx->tx_msg.ksm_zc_req_cookie = peer->ksnp_zc_next_cookie++;
list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
}
static void
{
ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
- spin_lock(&peer->ksnp_lock);
+ cfs_spin_lock(&peer->ksnp_lock);
if (tx->tx_msg.ksm_zc_req_cookie == 0) {
/* Not waiting for an ACK */
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
return;
}
tx->tx_msg.ksm_zc_req_cookie = 0;
list_del(&tx->tx_zc_list);
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
ksocknal_tx_decref(tx);
}
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ counter, conn, cfs_atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
ksocknal_data.ksnd_reaper_waketime))
cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
return (rc);
}
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
list_add_tail (&route->ksnr_connd_list,
&ksocknal_data.ksnd_connd_routes);
cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
}
ksock_conn_t *
#if SOCKNAL_ROUND_ROBIN
const int nob = 0;
#else
- int nob = atomic_read(&c->ksnc_tx_nob) +
- SOCK_WMEM_QUEUED(c->ksnc_sock);
+ int nob = cfs_atomic_read(&c->ksnc_tx_nob) +
+ libcfs_sock_wmem_queued(c->ksnc_sock);
#endif
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL);
KSOCK_MSG_NOOP,
tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
- atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ cfs_atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
tx->tx_conn = conn;
ksocknal_conn_addref(conn); /* +1 ref for tx */
* a blockable lock(socket lock), so SOCK_WMEM_QUEUED can't be
* put in spinlock.
*/
- bufnob = SOCK_WMEM_QUEUED(conn->ksnc_sock);
- spin_lock_bh (&sched->kss_lock);
+ bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
+ cfs_spin_lock_bh (&sched->kss_lock);
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_tx_bufnob = 0;
- mb(); /* order with adding to tx_queue */
+ cfs_mb(); /* order with adding to tx_queue */
}
ztx = NULL;
if (conn->ksnc_tx_mono != NULL) {
if (ksocknal_piggyback_zcack(conn, msg->ksm_zc_ack_cookie)) {
/* zc-ack cookie is piggybacked */
- atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
+ cfs_atomic_sub (tx->tx_nob, &conn->ksnc_tx_nob);
ztx = tx; /* Put to freelist later */
} else {
/* no packet can piggyback zc-ack cookie */
list_add(&tx->tx_list, &ztx->tx_list);
list_del(&ztx->tx_list);
- atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ cfs_atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
} else {
/* no noop zc-ack packet, just enqueue it */
LASSERT(conn->ksnc_tx_mono->tx_msg.ksm_type == KSOCK_MSG_LNET);
cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
}
ksock_route_t *
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_route_t *route;
- rwlock_t *g_lock;
+ cfs_rwlock_t *g_lock;
int retry;
int rc;
for (retry = 0;; retry = 1) {
#if !SOCKNAL_ROUND_ROBIN
- read_lock (g_lock);
+ cfs_read_lock (g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
if (ksocknal_find_connectable_route_locked(peer) == NULL) {
* connecting and I do have an actual
* connection... */
ksocknal_queue_tx_locked (tx, conn);
- read_unlock (g_lock);
+ cfs_read_unlock (g_lock);
return (0);
}
}
}
/* I'll need a write lock... */
- read_unlock (g_lock);
+ cfs_read_unlock (g_lock);
#endif
- write_lock_bh (g_lock);
+ cfs_write_lock_bh (g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
break;
- write_unlock_bh (g_lock);
+ cfs_write_unlock_bh (g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
CERROR("Refusing to create a connection to "
if (conn != NULL) {
/* Connection exists; queue message on it */
ksocknal_queue_tx_locked (tx, conn);
- write_unlock_bh (g_lock);
+ cfs_write_unlock_bh (g_lock);
return (0);
}
ksocknal_find_connecting_route_locked (peer) != NULL) {
/* Queue the message until a connection is established */
list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
- write_unlock_bh (g_lock);
+ cfs_write_unlock_bh (g_lock);
return 0;
}
- write_unlock_bh (g_lock);
+ cfs_write_unlock_bh (g_lock);
/* NB Routes may be ignored if connections to them failed recently */
CDEBUG(D_NETERROR, "No usable routes to %s\n", libcfs_id2str(id));
LASSERT (payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!in_interrupt ());
+ LASSERT (!cfs_in_interrupt ());
if (payload_iov != NULL)
desc_size = offsetof(ksock_tx_t,
if (pid < 0)
return ((int)pid);
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads++;
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (0);
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
}
int
if (nob_to_skip == 0) { /* right at next packet boundary now */
conn->ksnc_rx_started = 0;
- mb (); /* racing with timeout thread */
+ cfs_mb(); /* racing with timeout thread */
switch (conn->ksnc_proto->pro_version) {
case KSOCK_PROTO_V2:
ksock_sched_t *sched;
int rc;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked (0, peer);
if (conn == NULL) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
CERROR("Can't find connection to send zcack.\n");
return -ECONNRESET;
}
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
rc = ksocknal_piggyback_zcack(conn, cookie);
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (rc) {
/* Ack cookie is piggybacked */
return 0;
ksocknal_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
tx->tx_msg.ksm_zc_ack_cookie = cookie; /* incoming cookie */
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked (0, peer);
if (conn == NULL) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
ksocknal_free_tx(tx);
CERROR("Can't find connection to send zcack.\n");
return -ECONNRESET;
}
ksocknal_queue_tx_locked(tx, conn);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return 0;
}
ksock_tx_t *tx;
struct list_head *ctmp;
- spin_lock(&peer->ksnp_lock);
+ cfs_spin_lock(&peer->ksnp_lock);
list_for_each(ctmp, &peer->ksnp_zc_req_list) {
tx = list_entry (ctmp, ksock_tx_t, tx_zc_list);
tx->tx_msg.ksm_zc_req_cookie = 0;
list_del(&tx->tx_zc_list);
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
ksocknal_tx_decref(tx);
return 0;
}
- spin_unlock(&peer->ksnp_lock);
+ cfs_spin_unlock(&peer->ksnp_lock);
return -EPROTO;
}
{
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
LASSERT (conn->ksnc_rx_scheduled);
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
ksocknal_conn_decref(conn);
return (0);
}
{
int rc;
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
rc = (!ksocknal_data.ksnd_shuttingdown &&
list_empty(&sched->kss_rx_conns) &&
list_empty(&sched->kss_tx_conns));
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
return (rc);
}
cfs_daemonize (name);
cfs_block_allsigs ();
-#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
- id = ksocknal_sched2cpu(id);
- if (cpu_online(id)) {
- cpumask_t m = CPU_MASK_NONE;
- cpu_set(id, m);
- set_cpus_allowed(current, m);
- } else {
+ if (ksocknal_lib_bind_thread_to_cpu(id))
CERROR ("Can't set CPU affinity for %s to %d\n", name, id);
- }
-#endif /* CONFIG_SMP && CPU_AFFINITY */
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
int did_something = 0;
* data_ready can set it any time after we release
* kss_lock. */
conn->ksnc_rx_ready = 0;
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
rc = ksocknal_process_receive(conn);
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
/* I'm the only one that can clear this flag */
LASSERT(conn->ksnc_rx_scheduled);
* write_space can set it any time after we release
* kss_lock. */
conn->ksnc_tx_ready = 0;
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
if (!list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
list_add (&tx->tx_list, &conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref (tx);
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
/* assume space for more */
conn->ksnc_tx_ready = 1;
}
}
if (!did_something || /* nothing to do */
++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
nloops = 0;
if (!did_something) { /* wait for something to do */
- rc = wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
sched->kss_waitq,
- !ksocknal_sched_cansleep(sched));
+ !ksocknal_sched_cansleep(sched), rc);
LASSERT (rc == 0);
} else {
our_cond_resched();
}
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
}
}
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
ksocknal_thread_fini ();
return (0);
}
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
conn->ksnc_rx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
EXIT;
}
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
conn->ksnc_tx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
EXIT;
}
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
LASSERT (route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
type = SOCKLND_CONN_BULK_OUT;
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(cfs_time_current(), deadline)) {
rc = -ETIMEDOUT;
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
}
route->ksnr_scheduled = 0;
ksocknal_launch_connection_locked(route);
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return;
failed:
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
}
#endif
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_peer_failed(peer);
ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
{
int rc;
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
rc = ksocknal_data.ksnd_shuttingdown ||
!list_empty(&ksocknal_data.ksnd_connd_connreqs) ||
ksocknal_connd_connect_route_locked();
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
return rc;
}
char name[16];
ksock_connreq_t *cr;
ksock_route_t *route;
+ int rc = 0;
snprintf (name, sizeof (name), "socknal_cd%02ld", id);
cfs_daemonize (name);
cfs_block_allsigs ();
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_connreq_t, ksncr_list);
list_del(&cr->ksncr_list);
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
ksocknal_create_conn(cr->ksncr_ni, NULL,
cr->ksncr_sock, SOCKLND_CONN_NONE);
lnet_ni_decref(cr->ksncr_ni);
LIBCFS_FREE(cr, sizeof(*cr));
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
}
if (ksocknal_connd_connect_route_locked()) {
list_del (&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
ksocknal_connect (route);
ksocknal_route_decref(route);
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_connecting--;
}
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
- wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
ksocknal_data.ksnd_connd_waitq,
- ksocknal_connd_ready());
+ ksocknal_connd_ready(), rc);
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
}
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
ksocknal_thread_fini ();
return (0);
/* SOCK_ERROR will reset error code of socket in
* some platform (like Darwin8.x) */
- error = SOCK_ERROR(conn->ksnc_sock);
+ error = libcfs_sock_error(conn->ksnc_sock);
if (error != 0) {
ksocknal_conn_addref(conn);
}
if ((!list_empty(&conn->ksnc_tx_queue) ||
- SOCK_WMEM_QUEUED(conn->ksnc_sock) != 0) &&
+ libcfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
/* Timed out messages queued for sending or
/* NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
* take a look... */
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
list_for_each (ptmp, peers) {
peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
conn = ksocknal_find_timed_out_conn (peer);
if (conn != NULL) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
}
int
CFS_INIT_LIST_HEAD(&enomem_conns);
cfs_waitlink_init (&wait);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_terminate_conn (conn);
ksocknal_conn_decref(conn);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_destroy_conn (conn);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
continue;
}
list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
sched = conn->ksnc_scheduler;
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
LASSERT (conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
cfs_waitq_signal (&sched->kss_waitq);
- spin_unlock_bh (&sched->kss_lock);
+ cfs_spin_unlock_bh (&sched->kss_lock);
nenomem_conns++;
}
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- set_current_state (TASK_INTERRUPTIBLE);
+ cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add (&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty (&ksocknal_data.ksnd_zombie_conns))
cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
- set_current_state (TASK_RUNNING);
+ cfs_set_current_state (CFS_TASK_RUNNING);
cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
}
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
ksocknal_thread_fini ();
return (0);
info = &ksocknal_data.ksnd_irqinfo[irq];
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
LASSERT (info->ksni_valid);
bind = !info->ksni_bound;
info->ksni_bound = 1;
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
if (!bind) /* bound already */
return;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
} else
ksocknal_read_callback(conn);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
EXIT;
}
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
wspace = SOCKNAL_WSPACE(sk);
LASSERT (sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space (sk);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return;
}
clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
}
void
return ;
}
+
+int
+ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+{
+ int rc = 0;
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+ cfs_spin_lock_bh (&sched->kss_lock);
+
+ if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
+ !conn->ksnc_tx_ready) {
+ /* SOCK_NOSPACE is set when the socket fills
+ * and cleared in the write_space callback
+ * (which also sets ksnc_tx_ready). If
+ * SOCK_NOSPACE and ksnc_tx_ready are BOTH
+ * zero, I didn't fill the socket and
+ * write_space won't reschedule me, so I
+ * return -ENOMEM to get my caller to retry
+ * after a timeout */
+ rc = -ENOMEM;
+ }
+
+ cfs_spin_unlock_bh (&sched->kss_lock);
+
+ return rc;
+}
+
+__u64
+ksocknal_lib_new_incarnation(void)
+{
+ struct timeval tv;
+
+ /* The incarnation number is the time this module loaded and it
+ * identifies this particular instance of the socknal. Hopefully
+ * we won't be able to reboot more frequently than 1MHz for the
+ * forseeable future :) */
+
+ do_gettimeofday(&tv);
+
+ return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+int
+ksocknal_lib_bind_thread_to_cpu(int id)
+{
+#if defined(CONFIG_SMP) && defined(CPU_AFFINITY)
+ id = ksocknal_sched2cpu(id);
+ if (cpu_online(id)) {
+ cpumask_t m = CPU_MASK_NONE;
+ cpu_set(id, m);
+ set_cpus_allowed(current, m);
+ return 0;
+ }
+
+ return -1;
+
+#else
+ return 0;
+#endif
+}