/* Set receive timeout to remaining time */
tv = (struct timeval) {
.tv_sec = ticks / HZ,
- .tv_usec = ((ticks % HZ) * 1000000) / HZ;
+ .tv_usec = ((ticks % HZ) * 1000000) / HZ
};
set_fs(KERNEL_DS);
rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
set_fs(oldmm);
if (rc != 0) {
CERROR("Can't set socket recv timeout %d: %d\n",
- send_timeout, rc);
+ timeout, rc);
return rc;
}
{
struct socket *sock;
int rc;
- struct timeval tv;
int option;
mm_segment_t oldmm = get_fs();
void
kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn)
{
+ RAP_RETURN rrc;
+
memset(connreq, 0, sizeof(*connreq));
- connreq->racr_magic = RANAL_MSG_MAGIC;
- connreq->racr_version = RANAL_MSG_VERSION;
- connreq->racr_devid = conn->rac_device->rad_id;
- connreq->racr_nid = kranal_lib.libnal_ni.ni_pid.nid;
- connreq->racr_timeout = conn->rac_timeout;
- connreq->racr_incarnation = conn->rac_my_incarnation;
+ connreq->racr_magic = RANAL_MSG_MAGIC;
+ connreq->racr_version = RANAL_MSG_VERSION;
+ connreq->racr_devid = conn->rac_device->rad_id;
+ connreq->racr_nid = kranal_lib.libnal_ni.ni_pid.nid;
+ connreq->racr_peerstamp = kranal_data.kra_peerstamp;
+ connreq->racr_connstamp = conn->rac_my_connstamp;
+ connreq->racr_timeout = conn->rac_timeout;
rrc = RapkGetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
LASSERT(rrc == RAP_SUCCESS);
}
int
-kranal_recv_connreq(struct sock *sock, kra_connreq_t *connreq, int timeout)
+kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout)
{
- int i;
int rc;
- rc = kranal_sock_read(newsock, connreq, sizeof(*connreq), timeout);
+ rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout);
if (rc != 0) {
CERROR("Read failed: %d\n", rc);
return rc;
__swab16s(&connreq->racr_version);
__swab16s(&connreq->racr_devid);
__swab64s(&connreq->racr_nid);
- __swab64s(&connreq->racr_incarnation);
+ __swab64s(&connreq->racr_peerstamp);
+ __swab64s(&connreq->racr_connstamp);
__swab32s(&connreq->racr_timeout);
__swab32s(&connreq->racr_riparams.FmaDomainHndl);
return -EPROTO;
}
- for (i = 0; i < kranal_data.kra_ndevs; i++)
- if (connreq->racr_devid ==
- kranal_data.kra_devices[i]->rad_id)
- break;
+ return 0;
+}
- if (i == kranal_data.kra_ndevs) {
- CERROR("Can't match device %d\n", connreq->racr_devid);
- return -ENODEV;
+int
+kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn)
+{
+ kra_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int loopback;
+ int count = 0;
+
+ loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
+
+ list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
+ conn = list_entry(ctmp, kra_conn_t, rac_list);
+
+ if (conn == newconn)
+ continue;
+
+ if (conn->rac_peerstamp != newconn->rac_peerstamp) {
+ CDEBUG(D_NET, "Closing stale conn nid:"LPX64
+ " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid,
+ conn->rac_peerstamp, newconn->rac_peerstamp);
+ LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp);
+ count++;
+ kranal_close_conn_locked(conn, -ESTALE);
+ continue;
+ }
+
+ if (conn->rac_device != newconn->rac_device)
+ continue;
+
+ if (loopback &&
+ newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
+ newconn->rac_peer_connstamp == conn->rac_my_connstamp)
+ continue;
+
+ LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp);
+
+ CDEBUG(D_NET, "Closing stale conn nid:"LPX64
+ " connstamp:"LPX64"("LPX64")\n", peer->rap_nid,
+ conn->rac_peer_connstamp, newconn->rac_peer_connstamp);
+
+ count++;
+ kranal_close_conn_locked(conn, -ESTALE);
}
- return 0;
+ return count;
}
int
-kranal_conn_isdup_locked(kranal_peer_t *peer, __u64 incarnation)
+kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn)
{
kra_conn_t *conn;
struct list_head *tmp;
- int loopback = 0;
+ int loopback;
+ loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid;
+
list_for_each(tmp, &peer->rap_conns) {
conn = list_entry(tmp, kra_conn_t, rac_list);
- if (conn->rac_incarnation < incarnation) {
- /* Conns with an older incarnation get culled later */
+ /* 'newconn' is from an earlier version of 'peer'!!! */
+ if (newconn->rac_peerstamp < conn->rac_peerstamp)
+ return 1;
+
+ /* 'conn' is from an earlier version of 'peer': it will be
+ * removed when we cull stale conns later on... */
+ if (newconn->rac_peerstamp > conn->rac_peerstamp)
continue;
- }
- if (!loopback &&
- conn->rac_incarnation == incarnation &&
- peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid) {
- /* loopback creates 2 conns */
- loopback = 1;
+ /* Different devices are OK */
+ if (conn->rac_device != newconn->rac_device)
+ continue;
+
+ /* It's me connecting to myself */
+ if (loopback &&
+ newconn->rac_my_connstamp == conn->rac_peer_connstamp &&
+ newconn->rac_peer_connstamp == conn->rac_my_connstamp)
continue;
- }
- return 1;
+ /* 'newconn' is an earlier connection from 'peer'!!! */
+ if (newconn->rac_peer_connstamp < conn->rac_peer_connstamp)
+ return 2;
+
+ /* 'conn' is an earlier connection from 'peer': it will be
+ * removed when we cull stale conns later on... */
+ if (newconn->rac_peer_connstamp > conn->rac_peer_connstamp)
+ continue;
+
+ /* 'newconn' has the SAME connection stamp; 'peer' isn't
+ * playing the game... */
+ return 3;
}
return 0;
write_lock_irqsave(&kranal_data.kra_global_lock, flags);
- conn->rac_my_incarnation = kranal_data.kra_next_incarnation++;
+ conn->rac_my_connstamp = kranal_data.kra_connstamp++;
do { /* allocate a unique cqid */
conn->rac_cqid = kranal_data.kra_next_cqid++;
- } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL)
+ } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
-kranal_alloc_conn(kra_conn_t **connp, kra_device_t *dev)
+kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
{
kra_conn_t *conn;
RAP_RETURN rrc;
return -ENOMEM;
memset(conn, 0, sizeof(*conn));
- conn->rac_cqid = cqid;
atomic_set(&conn->rac_refcount, 1);
INIT_LIST_HEAD(&conn->rac_list);
INIT_LIST_HEAD(&conn->rac_hashlist);
+ INIT_LIST_HEAD(&conn->rac_schedlist);
INIT_LIST_HEAD(&conn->rac_fmaq);
INIT_LIST_HEAD(&conn->rac_rdmaq);
INIT_LIST_HEAD(&conn->rac_replyq);
spin_lock_init(&conn->rac_lock);
+ kranal_set_conn_uniqueness(conn);
+
conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT);
kranal_update_reaper_timeout(conn->rac_timeout);
- rrc = RapkCreateRi(dev->rad_handle, cqid, dev->rad_ptag,
+ rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid,
+ dev->rad_ptag,
dev->rad_rdma_cq, dev->rad_fma_cq,
&conn->rac_rihandle);
if (rrc != RAP_SUCCESS) {
}
void
-__kranal_conn_decref(kra_conn_t *conn)
+kranal_destroy_conn(kra_conn_t *conn)
{
- kra_tx_t *tx;
RAP_RETURN rrc;
LASSERT (!in_interrupt());
LASSERT (!conn->rac_scheduled);
LASSERT (list_empty(&conn->rac_list));
LASSERT (list_empty(&conn->rac_hashlist));
+ LASSERT (list_empty(&conn->rac_schedlist));
LASSERT (atomic_read(&conn->rac_refcount) == 0);
-
- while (!list_empty(&conn->rac_fmaq)) {
- tx = list_entry(conn->rac_fmaq.next, kra_tx_t, tx_list);
-
- list_del(&tx->tx_list);
- kranal_tx_done(tx, -ECONNABORTED);
- }
-
- /* We may not destroy this connection while it has RDMAs outstanding */
+ LASSERT (list_empty(&conn->rac_fmaq));
LASSERT (list_empty(&conn->rac_rdmaq));
+ LASSERT (list_empty(&conn->rac_replyq));
- while (!list_empty(&conn->rac_replyq)) {
- tx = list_entry(conn->rac_replyq.next, kra_tx_t, tx_list);
-
- list_del(&tx->tx_list);
- kranal_tx_done(tx, -ECONNABORTED);
- }
-
rrc = RapkDestroyRi(conn->rac_device->rad_handle,
conn->rac_rihandle);
LASSERT (rrc == RAP_SUCCESS);
void
kranal_terminate_conn_locked (kra_conn_t *conn)
{
- kra_peer_t *peer - conn->rac_peer;
-
LASSERT (!in_interrupt());
- LASSERT (conn->rac_closing);
+ LASSERT (conn->rac_state == RANAL_CONN_CLOSING);
LASSERT (!list_empty(&conn->rac_hashlist));
LASSERT (list_empty(&conn->rac_list));
- /* Remove from conn hash table (no new callbacks) */
+ /* Remove from conn hash table: no new callbacks */
list_del_init(&conn->rac_hashlist);
kranal_conn_decref(conn);
- /* Conn is now just waiting for remaining refs to go */
+ conn->rac_state = RANAL_CONN_CLOSED;
+
+ /* schedule to clear out all uncompleted comms in context of dev's
+ * scheduler */
+ kranal_schedule_conn(conn);
}
void
"closing conn to "LPX64": error %d\n", peer->rap_nid, error);
LASSERT (!in_interrupt());
- LASSERT (!conn->rac_closing);
+ LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED);
LASSERT (!list_empty(&conn->rac_hashlist));
LASSERT (!list_empty(&conn->rac_list));
/* Non-persistent peer with no more conns... */
kranal_unlink_peer_locked(peer);
}
+
+ /* Reset RX timeout to ensure we wait for an incoming CLOSE for the
+ * full timeout */
+ conn->rac_last_rx = jiffies;
+ mb();
- conn->rac_closing = 1;
- kranal_schedule_conn(conn);
+ conn->rac_state = RANAL_CONN_CLOSING;
+ kranal_schedule_conn(conn); /* schedule sending CLOSE */
kranal_conn_decref(conn); /* lose peer's ref */
}
write_lock_irqsave(&kranal_data.kra_global_lock, flags);
- if (!conn->rac_closing)
+ if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, error);
write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
+kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
+ __u32 peer_ip, int peer_port)
+{
+ RAP_RETURN rrc;
+
+ rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
+ if (rrc != RAP_SUCCESS) {
+ CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n",
+ HIPQUAD(peer_ip), peer_port, rrc);
+ return -EPROTO;
+ }
+
+ conn->rac_peerstamp = connreq->racr_peerstamp;
+ conn->rac_peer_connstamp = connreq->racr_connstamp;
+ conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout);
+ kranal_update_reaper_timeout(conn->rac_keepalive);
+ return 0;
+}
+
+int
kranal_passive_conn_handshake (struct socket *sock,
- ptl_nid_t **peer_nidp, kra_conn_t **connp)
+ ptl_nid_t *peer_nidp, kra_conn_t **connp)
{
struct sockaddr_in addr;
__u32 peer_ip;
ptl_nid_t peer_nid;
kra_conn_t *conn;
kra_device_t *dev;
- RAP_RETURN rrc;
int rc;
+ int len;
int i;
- rc = sock->ops->getname(newsock, (struct sockaddr *)addr, &len, 2);
+ len = sizeof(addr);
+ rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2);
if (rc != 0) {
CERROR("Can't get peer's IP: %d\n", rc);
return rc;
}
- peer_ip = ntohl(sin.sin_addr.s_addr);
- peer_port = ntohs(sin.sin_port);
+ peer_ip = ntohl(addr.sin_addr.s_addr);
+ peer_port = ntohs(addr.sin_port);
if (peer_port >= 1024) {
CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n",
}
rc = kranal_recv_connreq(sock, &connreq,
- kranal_data.kra_listener_timeout);
+ kranal_tunables.kra_listener_timeout);
if (rc != 0) {
CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
HIPQUAD(peer_ip), peer_port, rc);
LASSERT (peer_nid != PTL_NID_ANY);
for (i = 0;;i++) {
- LASSERT(i < kranal_data.kra_ndevs);
+ if (i == kranal_data.kra_ndevs) {
+ CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n",
+ connreq.racr_devid, HIPQUAD(peer_ip), peer_port);
+ return -ENODEV;
+ }
dev = &kranal_data.kra_devices[i];
- if (dev->rad_id == connreq->racr_devid)
+ if (dev->rad_id == connreq.racr_devid)
break;
}
- rc = kranal_alloc_conn(&conn, dev,(__u32)(peer_nid & 0xffffffff));
+ rc = kranal_create_conn(&conn, dev);
if (rc != 0)
return rc;
- conn->rac_peer_incarnation = connreq.racr_incarnation;
- conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
- kranal_update_reaper_timeout(conn->rac_keepalive);
-
- rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams);
- if (rrc != RAP_SUCCESS) {
- CERROR("Can't set riparams for "LPX64": %d\n", peer_nid, rrc);
+ rc = kranal_set_conn_params(conn, &connreq, peer_ip, peer_port);
+ if (rc != 0) {
kranal_conn_decref(conn);
- return -EPROTO;
+ return rc;
}
kranal_pack_connreq(&connreq, conn);
rc = kranal_sock_write(sock, &connreq, sizeof(connreq));
if (rc != 0) {
- CERROR("Can't tx connreq to %u.%u.%u.%u/%p: %d\n",
+ CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n",
HIPQUAD(peer_ip), peer_port, rc);
kranal_conn_decref(conn);
return rc;
struct socket *sock;
unsigned int port;
int rc;
- int option;
- mm_segment_t oldmm = get_fs();
- struct timeval tv;
for (port = 1023; port >= 512; port--) {
CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n",
port, HIPQUAD(peer->rap_ip), peer->rap_port);
}
+
+ /* all ports busy */
+ return -EHOSTUNREACH;
}
kra_conn_t *conn;
kra_device_t *dev;
struct socket *sock;
- __u32 id32;
- RAP_RETURN rrc;
int rc;
+ int idx;
+
+ idx = peer->rap_nid & 0x7fffffff;
+ dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs];
- id32 = (peer_nid & 0xffffffff);
- dev = &kranal_data.kra_devices[id32 % kranal_data.kra_ndevs];
-
- rc = kranal_alloc_conn(&conn, dev, id32);
+ rc = kranal_create_conn(&conn, dev);
if (rc != 0)
return rc;
kranal_pack_connreq(&connreq, conn);
- memset(&dstaddr, 0, sizeof(addr));
- dstaddr.sin_family = AF_INET;
- dstaddr.sin_port = htons(peer->rap_port);
- dstaddr.sin_addr.s_addr = htonl(peer->rap_ip);
-
- memset(&srcaddr, 0, sizeof(addr));
-
rc = ranal_connect_sock(peer, &sock);
if (rc != 0)
goto failed_0;
goto failed_1;
}
- rc = kranal_recv_connreq(sock, &connreq, kranal_data.kra_timeout);
+ rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout);
if (rc != 0) {
CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n",
HIPQUAD(peer->rap_ip), peer->rap_port, rc);
goto failed_0;
}
- conn->rac_peer_incarnation = connreq.racr_incarnation;
- conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq.racr_timeout);
- kranal_update_reaper_timeout(conn->rac_keepalive);
-
- rc = -ENETDOWN;
- rrc = RapkSetRiParams(conn->rac_rihandle,
- &connreq->racr_riparams);
- if (rrc != RAP_SUCCESS) {
- CERROR("Can't set riparams for "LPX64": %d\n",
- peer_nid, rrc);
+ rc = kranal_set_conn_params(conn, &connreq,
+ peer->rap_ip, peer->rap_port);
+ if (rc != 0)
goto failed_0;
- }
*connp = conn;
return 0;
failed_1:
- release_sock(sock);
+ sock_release(sock);
failed_0:
kranal_conn_decref(conn);
return rc;
}
int
-kranal_conn_handshake (struct socket *sock, kranal_peer_t *peer)
+kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
{
- kranal_peer_t *peer2;
+ kra_peer_t *peer2;
+ kra_tx_t *tx;
ptl_nid_t peer_nid;
unsigned long flags;
- unsigned long timeout;
kra_conn_t *conn;
int rc;
int nstale;
- if (sock != NULL) {
- /* passive: listener accepted sock */
+ if (sock == NULL) {
+ /* active: connd wants to connect to 'peer' */
+ LASSERT (peer != NULL);
+ LASSERT (peer->rap_connecting);
+
+ rc = kranal_active_conn_handshake(peer, &conn);
+ if (rc != 0)
+ return rc;
+
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+
+ if (!kranal_peer_active(peer)) {
+ /* raced with peer getting unlinked */
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ flags);
+ kranal_conn_decref(conn);
+ return ESTALE;
+ }
+
+ peer_nid = peer->rap_nid;
+
+ } else {
+ /* passive: listener accepted 'sock' */
LASSERT (peer == NULL);
rc = kranal_passive_conn_handshake(sock, &peer_nid, &conn);
* table with no connections: I can't drop the global lock
* until I've given it a connection or removed it, and when
* I do 'peer' can disappear under me. */
- } else {
- /* active: connd wants to connect to peer */
- LASSERT (peer != NULL);
- LASSERT (peer->rap_connecting);
-
- rc = kranal_active_conn_handshake(peer, &conn);
- if (rc != 0)
- return rc;
-
- write_lock_irqsave(&kranal_data.kra_global_lock, flags);
-
- if (!kranal_peer_active(peer)) {
- /* raced with peer getting unlinked */
- write_unlock_irqrestore(&kranal_data.kra_global_lock,
- flags);
- kranal_conn_decref(conn);
- return ESTALE;
- }
- }
+ }
LASSERT (kranal_peer_active(peer)); /* peer is in the peer table */
- peer_nid = peer->rap_nid;
/* Refuse to duplicate an existing connection (both sides might try
* to connect at once). NB we return success! We _do_ have a
* connection (so we don't need to remove the peer from the peer
* table) and we _don't_ have any blocked txs to complete */
- if (kranal_conn_isdup_locked(peer, conn->rac_incarnation)) {
+ rc = kranal_conn_isdup_locked(peer, conn);
+ if (rc != 0) {
LASSERT (!list_empty(&peer->rap_conns));
LASSERT (list_empty(&peer->rap_tx_queue));
write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
- CWARN("Not creating duplicate connection to "LPX64"\n",
- peer_nid);
+ CWARN("Not creating duplicate connection to "LPX64": %d\n",
+ peer_nid, rc);
kranal_conn_decref(conn);
return 0;
}
kra_tx_t, tx_list);
list_del(&tx->tx_list);
- kranal_queue_tx_locked(tx, conn);
+ kranal_post_fma(conn, tx);
}
- nstale = kranal_close_stale_conns_locked(peer, conn->rac_incarnation);
+ nstale = kranal_close_stale_conns_locked(peer, conn);
write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
rc = kranal_conn_handshake(NULL, peer);
- write_lock_irqqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
LASSERT (peer->rap_connecting);
peer->rap_connecting = 0;
peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL;
peer->rap_reconnect_time = CURRENT_TIME;
- write_unlock_irqrestore(&kranal-data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return;
}
struct socket *sock;
struct socket *newsock;
int port;
- int backlog;
- int timeout;
kra_connreq_t *connreqs;
char name[16];
+ int rc;
/* Parent thread holds kra_nid_mutex, and is, or is about to
* block on kra_listener_signal */
- port = kra_tunables.kra_port;
- snprintf(name, "kranal_lstn%03d", port);
+ port = kranal_tunables.kra_port;
+ snprintf(name, sizeof(name), "kranal_lstn%03d", port);
kportal_daemonize(name);
kportal_blockallsigs();
if (connreqs == NULL)
goto out_0;
- rc = kranal_create_sock(&sock, port);
+ rc = kranal_create_sock(&sock);
if (rc != 0)
goto out_1;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
- addr.sin_addr.s_addr = INADDR_ANY
+ addr.sin_addr.s_addr = INADDR_ANY;
- rc = sock->ops->bind(sock, &addr, sizeof(addr));
+ rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
if (rc != 0) {
CERROR("Can't bind to port %d\n", port);
goto out_2;
}
- rc = sock->ops->listen(sock, kra_tunalbes.kra_backlog);
+ rc = sock->ops->listen(sock, kranal_tunables.kra_backlog);
if (rc != 0) {
- CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
+ CERROR("Can't set listen backlog %d: %d\n",
+ kranal_tunables.kra_backlog, rc);
goto out_2;
}
}
int
-kranal_start_listener ()
+kranal_start_listener (void)
{
long pid;
int rc;
/* Called holding kra_nid_mutex: listener stopped */
LASSERT (kranal_data.kra_listener_sock == NULL);
- kranal_data.kra_listener_shutdown == 0;
- pid = kernel_thread(kranal_listener, sock, 0);
+ kranal_data.kra_listener_shutdown = 0;
+ pid = kernel_thread(kranal_listener, NULL, 0);
if (pid < 0) {
CERROR("Can't spawn listener: %ld\n", pid);
return (int)pid;
}
void
-kranal_stop_listener()
+kranal_stop_listener(void)
{
CDEBUG(D_WARNING, "Stopping listener\n");
LASSERT (kranal_data.kra_listener_sock != NULL);
kranal_data.kra_listener_shutdown = 1;
- wake_up_all(kranal_data->kra_listener_sock->sk->sk_sleep);
+ wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep);
/* Block until listener has torn down. */
down(&kranal_data.kra_listener_signal);
int old_val;
int rc;
+ /* No race with nal initialisation since the nal is setup all the time
+ * it's loaded. When that changes, change this! */
+ LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
+
down(&kranal_data.kra_nid_mutex);
- LASSERT (tunable == &kranal_data.kra_port ||
- tunable == &kranal_data.kra_backlog);
+ LASSERT (tunable == &kranal_tunables.kra_port ||
+ tunable == &kranal_tunables.kra_backlog);
old_val = *tunable;
rc = proc_dointvec(table, write, filp, buffer, lenp);
rc = kranal_start_listener();
if (rc != 0) {
+ CWARN("Unable to start listener with new tunable:"
+ " reverting to old value\n");
*tunable = old_val;
kranal_start_listener();
}
}
up(&kranal_data.kra_nid_mutex);
+
+ LASSERT (kranal_data.kra_init == RANAL_INIT_ALL);
return rc;
}
int
kranal_set_mynid(ptl_nid_t nid)
{
+ unsigned long flags;
lib_ni_t *ni = &kranal_lib.libnal_ni;
- int rc;
+ int rc = 0;
CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n",
nid, ni->ni_pid.nid);
if (kranal_data.kra_listener_sock != NULL)
kranal_stop_listener();
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ kranal_data.kra_peerstamp++;
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+
ni->ni_pid.nid = nid;
/* Delete all existing peers and their connections after new
- * NID/incarnation set to ensure no old connections in our brave
+ * NID/connstamp set to ensure no old connections in our brave
* new world. */
kranal_del_peer(PTL_NID_ANY, 0);
peer->rap_nid = nid;
atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->rap_list); /* not in the peer table yet */
+ INIT_LIST_HEAD(&peer->rap_list);
+ INIT_LIST_HEAD(&peer->rap_connd_list);
INIT_LIST_HEAD(&peer->rap_conns);
INIT_LIST_HEAD(&peer->rap_tx_queue);
}
void
-__kranal_peer_decref (kra_peer_t *peer)
+kranal_destroy_peer (kra_peer_t *peer)
{
CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer);
LASSERT (atomic_read(&peer->rap_refcount) == 0);
LASSERT (peer->rap_persistence == 0);
LASSERT (!kranal_peer_active(peer));
- LASSERT (peer->rap_connecting == 0);
+ LASSERT (!peer->rap_connecting);
LASSERT (list_empty(&peer->rap_conns));
LASSERT (list_empty(&peer->rap_tx_queue));
+ LASSERT (list_empty(&peer->rap_connd_list));
PORTAL_FREE(peer, sizeof(*peer));
}
int
-kranal_get_peer_info (int index, ptl_nid_t *nidp, int *portp, int *persistencep)
+kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp,
+ int *persistencep)
{
kra_peer_t *peer;
struct list_head *ptmp;
continue;
*nidp = peer->rap_nid;
+ *ipp = peer->rap_ip;
*portp = peer->rap_port;
*persistencep = peer->rap_persistence;
peer2 = kranal_find_peer_locked(nid);
if (peer2 != NULL) {
- kranal_put_peer(peer);
+ kranal_peer_decref(peer);
peer = peer2;
} else {
/* peer table takes existing ref on peer */
}
int
-kranal_close_stale_conns_locked (kra_peer_t *peer, __u64 incarnation)
-{
- kra_conn_t *conn;
- struct list_head *ctmp;
- struct list_head *cnxt;
- int count = 0;
-
- list_for_each_safe (ctmp, cnxt, &peer->rap_conns) {
- conn = list_entry(ctmp, kra_conn_t, rac_list);
-
- if (conn->rac_incarnation == incarnation)
- continue;
-
- CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
- peer->rap_nid, conn->rac_incarnation, incarnation);
- LASSERT (conn->rac_incarnation < incarnation);
-
- count++;
- kranal_close_conn_locked(conn, -ESTALE);
- }
-
- return count;
-}
-
-int
kranal_close_matching_conns (ptl_nid_t nid)
{
unsigned long flags;
pcfg->pcfg_id = 0;
pcfg->pcfg_misc = 0;
pcfg->pcfg_flags = 0;
- kranal_put_conn(conn);
+ kranal_conn_decref(conn);
}
break;
}
PORTAL_ALLOC(tx, sizeof(*tx));
if (tx == NULL) {
- CERROR("Can't allocate %stx[%d]\n",
- isnblk ? "nblk ", i);
- kranal_free_txdescs();
+ CERROR("Can't allocate %stx[%d]\n",
+ isnblk ? "nblk " : "", i);
+ kranal_free_txdescs(freelist);
return -ENOMEM;
}
PORTAL_ALLOC(tx->tx_phys,
- PLT_MD_MAX_IOV * sizeof(*tx->tx_phys));
+ PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
if (tx->tx_phys == NULL) {
CERROR("Can't allocate %stx[%d]->tx_phys\n",
- isnblk ? "nblk ", i);
+ isnblk ? "nblk " : "", i);
PORTAL_FREE(tx, sizeof(*tx));
kranal_free_txdescs(freelist);
return -ENOMEM;
}
- tx->tx_isnblk = isnblk
+ tx->tx_isnblk = isnblk;
tx->tx_buftype = RANAL_BUF_NONE;
+ tx->tx_msg.ram_type = RANAL_MSG_NONE;
list_add(&tx->tx_list, freelist);
}
RAP_RETURN rrc;
dev->rad_id = id;
- rrc = RapkGetDeviceByIndex(id, NULL, kranal_device_callback,
+ rrc = RapkGetDeviceByIndex(id, kranal_device_callback,
&dev->rad_handle);
if (rrc != RAP_SUCCESS) {
- CERROR("Can't get Rapidarray Device %d: %d\n", idx, rrc);
+ CERROR("Can't get Rapidarray Device %d: %d\n", id, rrc);
goto failed_0;
}
rrc = RapkReserveRdma(dev->rad_handle, total_ntx);
if (rrc != RAP_SUCCESS) {
CERROR("Can't reserve %d RDMA descriptors"
- " for device[%d]: %d\n", total_ntx, i, rrc);
+ " for device %d: %d\n", total_ntx, id, rrc);
goto failed_1;
}
&dev->rad_ptag);
if (rrc != RAP_SUCCESS) {
CERROR("Can't create ptag"
- " for device[%d]: %d\n", i, rrc);
+ " for device %d: %d\n", id, rrc);
goto failed_1;
}
&dev->rad_rdma_cq);
if (rrc != RAP_SUCCESS) {
CERROR("Can't create rdma cq size %d"
- " for device[%d]: %d\n", total_ntx, i, rrc);
+ " for device %d: %d\n", total_ntx, id, rrc);
goto failed_2;
}
dev->rad_ptag, &dev->rad_fma_cq);
if (rrc != RAP_SUCCESS) {
CERROR("Can't create fma cq size %d"
- " for device[%d]: %d\n", RANAL_RX_CQ_SIZE, i, rrc);
+ " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc);
goto failed_3;
}
void
kranal_device_fini(kra_device_t *dev)
{
- RapkDestroyCQ(dev->rad_handle, dev->rad_rx_cq, dev->rad_ptag);
+ LASSERT(dev->rad_scheduler == NULL);
+ RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cq, dev->rad_ptag);
RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cq, dev->rad_ptag);
RapkDestroyPtag(dev->rad_handle, dev->rad_ptag);
RapkReleaseDevice(dev->rad_handle);
kranal_api_shutdown (nal_t *nal)
{
int i;
- int rc;
unsigned long flags;
if (nal->nal_refct != 0) {
case RANAL_INIT_ALL:
/* stop calls to nal_cmd */
- libcfs_nal_cmd_unregister(OPENRANAL);
+ libcfs_nal_cmd_unregister(RANAL);
/* No new persistent peers */
/* resetting my NID to unadvertises me, removes my
/* Wait for all peer/conn state to clean up */
i = 2;
while (atomic_read(&kranal_data.kra_nconns) != 0 ||
- atomic_read(&kranal-data.kra_npeers) != 0) {
+ atomic_read(&kranal_data.kra_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers and %d conns to close down\n",
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
LASSERT (list_empty(&kranal_data.kra_connd_peers));
- spin_lock_irqsave(&kranal-data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
wake_up_all(&kranal_data.kra_connd_waitq);
- spin_unlock_irqrestore(&kranal-data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
i = 2;
while (atomic_read(&kranal_data.kra_nthreads) != 0) {
memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */
/* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
- * a unique (for all time) incarnation so we can uniquely identify
- * the sender. The incarnation is an incrementing counter
+ * a unique (for all time) connstamp so we can uniquely identify
+ * the sender. The connstamp is an incrementing counter
* initialised with seconds + microseconds at startup time. So we
* rely on NOT creating connections more frequently on average than
- * 1MHz to ensure we don't use old incarnations when we reboot. */
+ * 1MHz to ensure we don't use old connstamps when we reboot. */
do_gettimeofday(&tv);
- kranal_data.kra_next_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ kranal_data.kra_connstamp =
+ kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
init_MUTEX(&kranal_data.kra_nid_mutex);
init_MUTEX_LOCKED(&kranal_data.kra_listener_signal);
spin_lock_init(&dev->rad_lock);
}
+ kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT;
init_waitqueue_head(&kranal_data.kra_reaper_waitq);
spin_lock_init(&kranal_data.kra_reaper_lock);
if (kranal_data.kra_ndevs == 0)
goto failed;
- rc = libcfs_nal_cmd_register(OPENRANAL, &kranal_cmd, NULL);
+ rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL);
if (rc != 0) {
CERROR("Can't initialise command interface (rc = %d)\n", rc);
goto failed;
#endif
PtlNIFini(kranal_ni);
- ptl_unregister_nal(OPENRANAL);
+ ptl_unregister_nal(RANAL);
}
int __init
/* Initialise dynamic tunables to defaults once only */
kranal_tunables.kra_timeout = RANAL_TIMEOUT;
- rc = ptl_register_nal(OPENRANAL, &kranal_api);
+ rc = ptl_register_nal(RANAL, &kranal_api);
if (rc != PTL_OK) {
CERROR("Can't register RANAL: %d\n", rc);
return -ENOMEM; /* or something... */
}
/* Pure gateways want the NAL started up at module load time... */
- rc = PtlNIInit(OPENRANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
+ rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni);
if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
- ptl_unregister_nal(OPENRANAL);
+ ptl_unregister_nal(RANAL);
return -ENODEV;
}