/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
- * Author: Zach Brown <zab@zabbo.net>
- * Author: Peter J. Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Eric Barton <eric@bartonsoftware.com>
+ * GPL HEADER START
*
- * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Portals is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Portals is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/socklnd/socklnd.c
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eric@bartonsoftware.com>
*/
#include "socklnd.h"
-lnd_t the_ksocklnd = {
- .lnd_type = SOCKLND,
- .lnd_startup = ksocknal_startup,
- .lnd_shutdown = ksocknal_shutdown,
- .lnd_ctl = ksocknal_ctl,
- .lnd_send = ksocknal_send,
- .lnd_recv = ksocknal_recv,
- .lnd_notify = ksocknal_notify,
- .lnd_accept = ksocknal_accept,
-};
-
+lnd_t the_ksocklnd;
ksock_nal_data_t ksocknal_data;
ksock_interface_t *
if (route == NULL)
return (NULL);
- atomic_set (&route->ksnr_refcount, 1);
+ cfs_atomic_set (&route->ksnr_refcount, 1);
route->ksnr_peer = NULL;
route->ksnr_retry_interval = 0; /* OK to connect at any time */
route->ksnr_ipaddr = ipaddr;
void
ksocknal_destroy_route (ksock_route_t *route)
{
- LASSERT (atomic_read(&route->ksnr_refcount) == 0);
+ LASSERT (cfs_atomic_read(&route->ksnr_refcount) == 0);
if (route->ksnr_peer != NULL)
ksocknal_peer_decref(route->ksnr_peer);
LASSERT (id.nid != LNET_NID_ANY);
LASSERT (id.pid != LNET_PID_ANY);
- LASSERT (!in_interrupt());
+ LASSERT (!cfs_in_interrupt());
LIBCFS_ALLOC (peer, sizeof (*peer));
if (peer == NULL)
peer->ksnp_ni = ni;
peer->ksnp_id = id;
- atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
+ cfs_atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
peer->ksnp_closing = 0;
peer->ksnp_accepting = 0;
- peer->ksnp_zc_next_cookie = 1;
peer->ksnp_proto = NULL;
+ peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+
CFS_INIT_LIST_HEAD (&peer->ksnp_conns);
CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
- spin_lock_init(&peer->ksnp_lock);
+ cfs_spin_lock_init(&peer->ksnp_lock);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
if (net->ksnn_shutdown) {
- spin_unlock_bh (&net->ksnn_lock);
-
+ cfs_spin_unlock_bh (&net->ksnn_lock);
+
LIBCFS_FREE(peer, sizeof(*peer));
CERROR("Can't create peer: network shutdown\n");
return -ESHUTDOWN;
net->ksnn_npeers++;
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
*peerp = peer;
return 0;
{
ksock_net_t *net = peer->ksnp_ni->ni_data;
- CDEBUG (D_NET, "peer %s %p deleted\n",
+ CDEBUG (D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
- LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+ LASSERT (cfs_atomic_read (&peer->ksnp_refcount) == 0);
LASSERT (peer->ksnp_accepting == 0);
LASSERT (list_empty (&peer->ksnp_conns));
LASSERT (list_empty (&peer->ksnp_routes));
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
* zero. */
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
net->ksnn_npeers--;
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
}
ksock_peer_t *
continue;
CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
- peer, libcfs_id2str(id),
- atomic_read(&peer->ksnp_refcount));
+ peer, libcfs_id2str(id),
+ cfs_atomic_read(&peer->ksnp_refcount));
return (peer);
}
return (NULL);
{
ksock_peer_t *peer;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked (ni, id);
if (peer != NULL) /* +1 ref for caller? */
ksocknal_peer_addref(peer);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (peer);
}
{
int i;
__u32 ip;
+ ksock_interface_t *iface;
for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
LASSERT (i < LNET_MAX_INTERFACES);
ip = peer->ksnp_passive_ips[i];
- ksocknal_ip2iface(peer->ksnp_ni, ip)->ksni_npeers--;
+ iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+ /* All IPs in peer->ksnp_passive_ips[] come from the
+ * interface list, therefore the call must succeed. */
+ LASSERT (iface != NULL);
+
+ CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
+ peer, iface, iface->ksni_nroutes);
+ iface->ksni_npeers--;
}
LASSERT (list_empty(&peer->ksnp_conns));
}
int
-ksocknal_get_peer_info (lnet_ni_t *ni, int index,
+ksocknal_get_peer_info (lnet_ni_t *ni, int index,
lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port,
int *conn_count, int *share_count)
{
int j;
int rc = -ENOENT;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
}
}
out:
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (rc);
}
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR ("Duplicate route %s %u.%u.%u.%u\n",
- libcfs_id2str(peer->ksnp_id),
+ libcfs_id2str(peer->ksnp_id),
HIPQUAD(route->ksnr_ipaddr));
LBUG();
}
return (-ENOMEM);
}
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
route2->ksnr_share_count++;
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (0);
}
int i;
int rc = -ENOENT;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
- lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(ni, &zombies, 1);
struct list_head *ctmp;
int i;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
ksocknal_conn_addref(conn);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (conn);
}
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (NULL);
}
if (irq != 0) { /* Hardware NIC */
info->ksni_valid = 1;
- info->ksni_sched = sched - ksocknal_data.ksnd_schedulers;
+ info->ksni_sched = (unsigned int)(sched - ksocknal_data.ksnd_schedulers);
/* no overflow... */
- LASSERT (info->ksni_sched == sched - ksocknal_data.ksnd_schedulers);
+ LASSERT (info->ksni_sched == (unsigned int)(sched - ksocknal_data.ksnd_schedulers));
}
return (sched);
int i;
int nip;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
nip = net->ksnn_ninterfaces;
LASSERT (nip <= LNET_MAX_INTERFACES);
/* Only offer interfaces for additional connections if I have
* more than one. */
if (nip < 2) {
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return 0;
}
-
+
for (i = 0; i < nip; i++) {
ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
LASSERT (ipaddrs[i] != 0);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return (nip);
}
int
ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
/* Also note that I'm not going to return more than n_peerips
* interfaces, even if I have more myself */
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
LASSERT (n_peerips <= LNET_MAX_INTERFACES);
LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
/* Overwrite input peer IP addresses */
memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
return (n_ips);
}
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
ksock_route_t *newroute = NULL;
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
struct list_head *rtmp;
* expecting to be dealing with small numbers of interfaces, so the
* O(n**3)-ness here shouldn't matter */
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
if (net->ksnn_ninterfaces < 2) {
/* Only create additional connections
* if I have > 1 interface */
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
return;
}
-
+
LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
for (i = 0; i < npeer_ipaddrs; i++) {
if (newroute != NULL) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
if (newroute == NULL)
return;
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
}
if (peer->ksnp_closing) {
newroute = NULL;
}
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
if (newroute != NULL)
ksocknal_route_decref(newroute);
}
cr->ksncr_ni = ni;
cr->ksncr_sock = sock;
- spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
-
- spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
return 0;
}
int
-ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
{
ksock_route_t *route;
-
- list_for_each_entry (route, &peer->ksnp_routes, ksnr_list) {
+
+ cfs_list_for_each_entry_typed (route, &peer->ksnp_routes,
+ ksock_route_t, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
return route->ksnr_connecting;
}
int
-ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
+ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
cfs_socket_t *sock, int type)
{
- rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
CFS_LIST_HEAD (zombies);
lnet_process_id_t peerid;
struct list_head *tmp;
ksock_hello_msg_t *hello;
unsigned int irq;
ksock_tx_t *tx;
+ ksock_tx_t *txtmp;
int rc;
int active;
char *warn = NULL;
}
memset (conn, 0, sizeof (*conn));
+
conn->ksnc_peer = NULL;
conn->ksnc_route = NULL;
conn->ksnc_sock = sock;
- atomic_set (&conn->ksnc_sock_refcount, 1); /* 1 ref for conn */
+ /* 2 ref, 1 for conn, another extra ref prevents socket
+ * being closed before establishment of connection */
+ cfs_atomic_set (&conn->ksnc_sock_refcount, 2);
conn->ksnc_type = type;
ksocknal_lib_save_callback(sock, conn);
- atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+ cfs_atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
- conn->ksnc_zc_capable = ksocknal_lib_zc_capable(sock);
conn->ksnc_rx_ready = 0;
conn->ksnc_rx_scheduled = 0;
CFS_INIT_LIST_HEAD (&conn->ksnc_tx_queue);
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
- conn->ksnc_tx_mono = NULL;
- atomic_set (&conn->ksnc_tx_nob, 0);
+ conn->ksnc_tx_carrier = NULL;
+ cfs_atomic_set (&conn->ksnc_tx_nob, 0);
LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
peerid = peer->ksnp_id;
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
conn->ksnc_proto = peer->ksnp_proto;
- write_unlock_bh(global_lock);
-
+ cfs_write_unlock_bh(global_lock);
+
if (conn->ksnc_proto == NULL) {
- conn->ksnc_proto = &ksocknal_protocol_v2x;
+ conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol != 2)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
+ if (*ksocknal_tunables.ksnd_protocol == 2)
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ else if (*ksocknal_tunables.ksnd_protocol == 1)
+ conn->ksnc_proto = &ksocknal_protocol_v1x;
#endif
}
if (active) {
ksocknal_peer_addref(peer);
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
if (rc != 0)
goto failed_1;
- write_lock_bh (global_lock);
+ cfs_write_lock_bh (global_lock);
/* called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
/* +1 ref for me */
ksocknal_peer_addref(peer);
peer->ksnp_accepting++;
-
+
/* Am I already connecting to this guy? Resolve in
* favour of higher NID... */
if (peerid.nid < ni->ni_nid &&
goto failed_2;
}
- if (peer->ksnp_proto == NULL) {
+ if (peer->ksnp_proto == NULL) {
/* Never connected before.
* NB recv_hello may have returned EPROTO to signal my peer
* wants a different protocol than the one I asked for.
*/
LASSERT (list_empty(&peer->ksnp_conns));
-
+
peer->ksnp_proto = conn->ksnc_proto;
peer->ksnp_incarnation = incarnation;
}
peer->ksnp_proto = NULL;
rc = ESTALE;
- warn = peer->ksnp_incarnation != incarnation ?
+ warn = peer->ksnp_incarnation != incarnation ?
"peer rebooted" :
"wrong proto version";
goto failed_2;
conn->ksnc_peer = peer; /* conn takes my ref on peer */
peer->ksnp_last_alive = cfs_time_current();
+ peer->ksnp_send_keepalive = 0;
peer->ksnp_error = 0;
sched = ksocknal_choose_scheduler_locked (irq);
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
+ conn->ksnc_tx_last_post = cfs_time_current();
/* Set the deadline for the outgoing HELLO to drain */
- conn->ksnc_tx_bufnob = SOCK_WMEM_QUEUED(sock);
+ conn->ksnc_tx_bufnob = libcfs_sock_wmem_queued(sock);
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with adding to peer's conn list */
+ cfs_mb(); /* order with adding to peer's conn list */
list_add (&conn->ksnc_list, &peer->ksnp_conns);
ksocknal_conn_addref(conn);
ksocknal_new_packet(conn, 0);
- /* Take all the packets blocking for a connection.
- * NB, it might be nicer to share these blocked packets among any
- * other connections that are becoming established. */
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
+
+ /* Take packets blocking for this connection. */
+ list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+ if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
+ continue;
list_del (&tx->tx_list);
ksocknal_queue_tx_locked (tx, conn);
}
- write_unlock_bh (global_lock);
+ cfs_write_unlock_bh (global_lock);
/* We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
hello->kshm_nips);
rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
-
+
LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
if (rc == 0)
rc = ksocknal_lib_setup_sock(sock);
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
/* NB my callbacks block while I hold ksnd_global_lock */
ksocknal_lib_set_callback(sock, conn);
if (!active)
peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
if (rc != 0) {
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
ksocknal_close_conn_locked(conn, rc);
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
} else if (ksocknal_connsock_addref(conn) == 0) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_connsock_decref(conn);
}
+ ksocknal_connsock_decref(conn);
ksocknal_conn_decref(conn);
return rc;
list_del_init(&peer->ksnp_tx_queue);
ksocknal_unlink_peer_locked(peer);
}
-
- write_unlock_bh (global_lock);
+
+ cfs_write_unlock_bh (global_lock);
if (warn != NULL) {
if (rc < 0)
ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- write_lock_bh(global_lock);
+ cfs_write_lock_bh(global_lock);
peer->ksnp_accepting--;
- write_unlock_bh(global_lock);
+ cfs_write_unlock_bh(global_lock);
}
-
+
ksocknal_txlist_done(ni, &zombies, 1);
ksocknal_peer_decref(peer);
if (list_empty (&peer->ksnp_conns)) {
/* No more connections to this peer */
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx;
+
+ LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+
+ /* throw them to the last connection...,
+ * these TXs will be send to /dev/null by scheduler */
+ list_for_each_entry(tx, &peer->ksnp_tx_queue, tx_list)
+ ksocknal_tx_prep(conn, tx);
+
+ spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ list_splice_init(&peer->ksnp_tx_queue, &conn->ksnc_tx_queue);
+ spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+ }
+
peer->ksnp_proto = NULL; /* renegotiate protocol version */
peer->ksnp_error = error; /* stash last conn close reason */
}
}
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
list_add_tail (&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
}
void
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
* there are no connections or connection attempts in existance. */
-
- read_lock (&ksocknal_data.ksnd_global_lock);
+
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
list_empty(&peer->ksnp_conns) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
notify = 1;
- last_alive = cfs_time_seconds(peer->ksnp_last_alive);
+ last_alive = (time_t) (cfs_time_current_sec() -
+ cfs_duration_sec(cfs_time_current() -
+ peer->ksnp_last_alive));
}
-
- read_unlock (&ksocknal_data.ksnd_global_lock);
+
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (notify)
lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
}
void
+ksocknal_finalize_zcreq(ksock_conn_t *conn)
+{
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_tx_t *tx;
+ ksock_tx_t *tmp;
+ CFS_LIST_HEAD (zlist);
+
+ /* NB safe to finalize TXs because closing of socket will
+ * abort all buffered data */
+ LASSERT (conn->ksnc_sock == NULL);
+
+ cfs_spin_lock(&peer->ksnp_lock);
+
+ cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
+ ksock_tx_t, tx_zc_list) {
+ if (tx->tx_conn != conn)
+ continue;
+
+ LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
+
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ list_del(&tx->tx_zc_list);
+ list_add(&tx->tx_zc_list, &zlist);
+ }
+
+ cfs_spin_unlock(&peer->ksnp_lock);
+
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+
+ list_del(&tx->tx_zc_list);
+ ksocknal_tx_decref(tx);
+ }
+}
+
+void
ksocknal_terminate_conn (ksock_conn_t *conn)
{
/* This gets called by the reaper (guaranteed thread context) to
ksock_peer_t *peer = conn->ksnc_peer;
ksock_sched_t *sched = conn->ksnc_scheduler;
int failed = 0;
- struct list_head *tmp;
- struct list_head *nxt;
- ksock_tx_t *tx;
- LIST_HEAD (zlist);
LASSERT(conn->ksnc_closing);
/* wake up the scheduler to "send" all remaining packets to /dev/null */
- spin_lock_bh (&sched->kss_lock);
+ cfs_spin_lock_bh (&sched->kss_lock);
+
+ /* a closing conn is always ready to tx */
+ conn->ksnc_tx_ready = 1;
if (!conn->ksnc_tx_scheduled &&
!list_empty(&conn->ksnc_tx_queue)){
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
- /* a closing conn is always ready to tx */
- conn->ksnc_tx_ready = 1;
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
cfs_waitq_signal (&sched->kss_waitq);
}
- spin_unlock_bh (&sched->kss_lock);
-
- spin_lock(&peer->ksnp_lock);
-
- list_for_each_safe(tmp, nxt, &peer->ksnp_zc_req_list) {
- tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
-
- if (tx->tx_conn != conn)
- continue;
-
- LASSERT (tx->tx_msg.ksm_zc_req_cookie != 0);
-
- tx->tx_msg.ksm_zc_req_cookie = 0;
- list_del(&tx->tx_zc_list);
- list_add(&tx->tx_zc_list, &zlist);
- }
-
- spin_unlock(&peer->ksnp_lock);
-
- list_for_each_safe(tmp, nxt, &zlist) {
- tx = list_entry(tmp, ksock_tx_t, tx_zc_list);
-
- list_del(&tx->tx_zc_list);
- ksocknal_tx_decref(tx);
- }
+ cfs_spin_unlock_bh (&sched->kss_lock);
/* serialise with callbacks */
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
peer->ksnp_error = 0; /* avoid multiple notifications */
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
if (failed)
ksocknal_peer_failed(peer);
{
/* Queue the conn for the reaper to destroy */
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) == 0);
- spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+ cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
-
- spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+
+ cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
}
void
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
- LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
- LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+ LASSERT (cfs_atomic_read (&conn->ksnc_conn_refcount) == 0);
+ LASSERT (cfs_atomic_read (&conn->ksnc_sock_refcount) == 0);
LASSERT (conn->ksnc_sock == NULL);
LASSERT (conn->ksnc_route == NULL);
LASSERT (!conn->ksnc_tx_scheduled);
", ip %d.%d.%d.%d:%d, with error\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
- lnet_finalize (conn->ksnc_peer->ksnp_ni,
+ lnet_finalize (conn->ksnc_peer->ksnp_ni,
conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (count);
}
int i;
int count = 0;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
- lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
{
/* The router is telling me she's been notified of a change in
* gateway state.... */
- lnet_process_id_t id = {.nid = gw_nid, .pid = LNET_PID_ANY};
+ lnet_process_id_t id = {0};
- CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
+ id.nid = gw_nid;
+ id.pid = LNET_PID_ANY;
+
+ CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
alive ? "up" : "down");
if (!alive) {
ksock_conn_t *conn;
for (index = 0; ; index++) {
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
i = 0;
conn = NULL;
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (conn == NULL)
break;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
for (j = 0; ; j++) {
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
index = 0;
peer = NULL;
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
if (peer != NULL) {
rc = 0;
netmask == 0)
return (-EINVAL);
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
if (iface != NULL) {
/* NB only new connections will pay attention to the new interface! */
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (rc);
}
int i;
int j;
- write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
}
}
- write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
return (rc);
}
int
ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
+ lnet_process_id_t id = {0};
struct libcfs_ioctl_data *data = arg;
int rc;
ksock_net_t *net = ni->ni_data;
ksock_interface_t *iface;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
- if (data->ioc_count < 0 ||
- data->ioc_count >= net->ksnn_ninterfaces) {
+ if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
rc = -ENOENT;
} else {
rc = 0;
data->ioc_u32[3] = iface->ksni_nroutes;
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return rc;
}
data->ioc_u32[1]); /* net mask */
case IOC_LIBCFS_DEL_INTERFACE:
- return ksocknal_del_interface(ni,
+ return ksocknal_del_interface(ni,
data->ioc_u32[0]); /* IP address */
case IOC_LIBCFS_GET_PEER: {
- lnet_process_id_t id = {0,};
__u32 myip = 0;
__u32 ip = 0;
int port = 0;
&conn_count, &share_count);
if (rc != 0)
return rc;
-
+
data->ioc_nid = id.nid;
data->ioc_count = share_count;
data->ioc_u32[0] = ip;
return 0;
}
- case IOC_LIBCFS_ADD_PEER: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LUSTRE_SRV_LNET_PID};
+ case IOC_LIBCFS_ADD_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LUSTRE_SRV_LNET_PID;
return ksocknal_add_peer (ni, id,
data->ioc_u32[0], /* IP */
data->ioc_u32[1]); /* port */
- }
- case IOC_LIBCFS_DEL_PEER: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
+
+ case IOC_LIBCFS_DEL_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_del_peer (ni, id,
data->ioc_u32[0]); /* IP */
- }
+
case IOC_LIBCFS_GET_CONN: {
int txmem;
int rxmem;
data->ioc_u32[1] = conn->ksnc_port;
data->ioc_u32[2] = conn->ksnc_myipaddr;
data->ioc_u32[3] = conn->ksnc_type;
- data->ioc_u32[4] = conn->ksnc_scheduler -
- ksocknal_data.ksnd_schedulers;
+ data->ioc_u32[4] = (__u32)(conn->ksnc_scheduler -
+ ksocknal_data.ksnd_schedulers);
data->ioc_u32[5] = rxmem;
data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
ksocknal_conn_decref(conn);
return 0;
}
- case IOC_LIBCFS_CLOSE_CONNECTION: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
-
+ case IOC_LIBCFS_CLOSE_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_close_matching_conns (id,
data->ioc_u32[0]);
- }
+
case IOC_LIBCFS_REGISTER_MYNID:
/* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
-
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
-
- case IOC_LIBCFS_PUSH_CONNECTION: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
-
+ if (data->ioc_nid == ni->ni_nid)
+ return 0;
+
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nid2str(ni->ni_nid));
+ return -EINVAL;
+
+ case IOC_LIBCFS_PUSH_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_push(ni, id);
- }
+
default:
return -EINVAL;
}
void
ksocknal_free_buffers (void)
{
- LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT (cfs_atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
if (ksocknal_data.ksnd_schedulers != NULL)
LIBCFS_FREE (ksocknal_data.ksnd_schedulers,
sizeof (struct list_head) *
ksocknal_data.ksnd_peer_hash_size);
- spin_lock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
while(!list_empty(&zlist)) {
tx = list_entry(zlist.next, ksock_tx_t, tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
} else {
- spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
}
int i;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
LASSERT (ksocknal_data.ksnd_nnets == 0);
switch (ksocknal_data.ksnd_init) {
}
i = 4;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
while (ksocknal_data.ksnd_nthreads != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
cfs_pause(cfs_time_seconds(1));
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
ksocknal_free_buffers();
}
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read (&libcfs_kmemory));
+ cfs_atomic_read (&libcfs_kmemory));
PORTAL_MODULE_UNUSE;
}
-
-__u64
-ksocknal_new_incarnation (void)
-{
- struct timeval tv;
-
- /* The incarnation number is the time this module loaded and it
- * identifies this particular instance of the socknal. Hopefully
- * we won't be able to reboot more frequently than 1MHz for the
- * forseeable future :) */
-
- do_gettimeofday(&tv);
-
- return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
int
ksocknal_base_startup (void)
{
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
- rwlock_init(&ksocknal_data.ksnd_global_lock);
+ cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
- spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
- spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
- spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+ cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
/* NB memset above zeros whole of ksocknal_data, including
for (i = 0; i < ksocknal_data.ksnd_nschedulers; i++) {
ksock_sched_t *kss = &ksocknal_data.ksnd_schedulers[i];
- spin_lock_init (&kss->kss_lock);
+ cfs_spin_lock_init (&kss->kss_lock);
CFS_INIT_LIST_HEAD (&kss->kss_rx_conns);
CFS_INIT_LIST_HEAD (&kss->kss_tx_conns);
CFS_INIT_LIST_HEAD (&kss->kss_zombie_noop_txs);
* connecting */
if (*ksocknal_tunables.ksnd_nconnds < 2)
*ksocknal_tunables.ksnd_nconnds = 2;
-
+
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- rc = ksocknal_thread_start (ksocknal_connd, (void *)((long)i));
+ rc = ksocknal_thread_start (ksocknal_connd,
+ (void *)((ulong_ptr_t)i));
if (rc != 0) {
CERROR("Can't spawn socknal connd: %d\n", rc);
goto failed;
struct list_head *tmp;
int i;
- read_lock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_lock (&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
"closing %d, accepting %d, err %d, zcookie "LPU64", "
"txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
- atomic_read(&peer->ksnp_refcount),
+ cfs_atomic_read(&peer->ksnp_refcount),
peer->ksnp_sharecount, peer->ksnp_closing,
peer->ksnp_accepting, peer->ksnp_error,
peer->ksnp_zc_next_cookie,
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
- "del %d\n", atomic_read(&route->ksnr_refcount),
+ "del %d\n", cfs_atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
route->ksnr_connected, route->ksnr_deleted);
}
list_for_each (tmp, &peer->ksnp_conns) {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
+ cfs_atomic_read(&conn->ksnc_conn_refcount),
+ cfs_atomic_read(&conn->ksnc_sock_refcount),
conn->ksnc_type, conn->ksnc_closing);
}
}
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
return;
}
{
ksock_net_t *net = ni->ni_data;
int i;
- lnet_process_id_t anyid = {.nid = LNET_NID_ANY,
- .pid = LNET_PID_ANY};
+ lnet_process_id_t anyid = {0};
+
+ anyid.nid = LNET_NID_ANY;
+ anyid.pid = LNET_PID_ANY;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
net->ksnn_shutdown = 1; /* prevent new peers */
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
/* Delete all peers */
ksocknal_del_peer(ni, anyid, 0);
/* Wait for all peer state to clean up */
i = 2;
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
while (net->ksnn_npeers != 0) {
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
ksocknal_debug_peerhash(ni);
- spin_lock_bh (&net->ksnn_lock);
+ cfs_spin_lock_bh (&net->ksnn_lock);
}
- spin_unlock_bh (&net->ksnn_lock);
+ cfs_spin_unlock_bh (&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
}
LIBCFS_FREE(net, sizeof(*net));
-
+
ksocknal_data.ksnd_nnets--;
if (ksocknal_data.ksnd_nnets == 0)
ksocknal_base_shutdown();
int j;
int rc;
int n;
-
+
n = libcfs_ipif_enumerate(&names);
if (n <= 0) {
CERROR("Can't enumerate interfaces: %d\n", n);
names[i], rc);
continue;
}
-
+
if (!up) {
CWARN("Ignoring interface %s (down)\n",
names[i]);
}
libcfs_ipif_free_enumeration(names, n);
-
+
if (j == 0)
CERROR("Can't find any usable interfaces\n");
-
+
return j;
}
if (rc != 0)
return rc;
}
-
+
LIBCFS_ALLOC(net, sizeof(*net));
if (net == NULL)
goto fail_0;
-
+
memset(net, 0, sizeof(*net));
- spin_lock_init(&net->ksnn_lock);
- net->ksnn_incarnation = ksocknal_new_incarnation();
+ cfs_spin_lock_init(&net->ksnn_lock);
+ net->ksnn_incarnation = ksocknal_lib_new_incarnation();
ni->ni_data = net;
ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peercredits;
-
+
if (ni->ni_interfaces[0] == NULL) {
rc = ksocknal_enumerate_interfaces(net);
if (rc <= 0)
ni->ni_interfaces[i], &up,
&net->ksnn_interfaces[i].ksni_ipaddr,
&net->ksnn_interfaces[i].ksni_netmask);
-
+
if (rc != 0) {
CERROR("Can't get interface %s info: %d\n",
ni->ni_interfaces[i], rc);
goto fail_1;
}
-
+
if (!up) {
CERROR("Interface %s is down\n",
ni->ni_interfaces[i]);
ksocknal_data.ksnd_nnets++;
return 0;
-
+
fail_1:
LIBCFS_FREE(net, sizeof(*net));
fail_0:
ksocknal_module_fini (void)
{
lnet_unregister_lnd(&the_ksocklnd);
- ksocknal_lib_tunables_fini();
+ ksocknal_tunables_fini();
}
int __init
int rc;
/* check ksnr_connected/connecting field large enough */
- CLASSERT(SOCKLND_CONN_NTYPES <= 4);
-
- rc = ksocknal_lib_tunables_init();
+ CLASSERT (SOCKLND_CONN_NTYPES <= 4);
+ CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
+
+ /* initialize the_ksocklnd */
+ the_ksocklnd.lnd_type = SOCKLND;
+ the_ksocklnd.lnd_startup = ksocknal_startup;
+ the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
+ the_ksocklnd.lnd_ctl = ksocknal_ctl;
+ the_ksocklnd.lnd_send = ksocknal_send;
+ the_ksocklnd.lnd_recv = ksocknal_recv;
+ the_ksocklnd.lnd_notify = ksocknal_notify;
+ the_ksocklnd.lnd_accept = ksocknal_accept;
+
+ rc = ksocknal_tunables_init();
if (rc != 0)
return rc;
return 0;
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
-MODULE_DESCRIPTION("Kernel TCP Socket LND v2.0.0");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
MODULE_LICENSE("GPL");
-cfs_module(ksocknal, "2.0.0", ksocknal_module_init, ksocknal_module_fini);
+cfs_module(ksocknal, "3.0.0", ksocknal_module_init, ksocknal_module_fini);