static int
ksocknal_get_peer_info(struct lnet_ni *ni, int index,
- struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
+ struct lnet_processid *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
struct ksock_peer_ni *peer_ni;
if (index-- > 0)
continue;
- id->pid = peer_ni->ksnp_id.pid;
- id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
+ *id = peer_ni->ksnp_id;
*myip = 0;
*peer_ip = 0;
*port = 0;
if (index-- > 0)
continue;
- id->pid = peer_ni->ksnp_id.pid;
- id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
+ *id = peer_ni->ksnp_id;
*myip = peer_ni->ksnp_passive_ips[j];
*peer_ip = 0;
*port = 0;
conn_cb = peer_ni->ksnp_conn_cb;
- id->pid = peer_ni->ksnp_id.pid;
- id->nid = lnet_nid_to_nid4(&peer_ni->ksnp_id.nid);
+ *id = peer_ni->ksnp_id;
if (conn_cb->ksnr_addr.ss_family == AF_INET) {
struct sockaddr_in *sa =
(void *)&conn_cb->ksnr_addr;
}
int
-ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id4,
+ksocknal_add_peer(struct lnet_ni *ni, struct lnet_processid *id,
struct sockaddr *addr)
{
struct ksock_peer_ni *peer_ni;
struct ksock_peer_ni *peer2;
struct ksock_conn_cb *conn_cb;
- struct lnet_processid id;
- if (id4.nid == LNET_NID_ANY ||
- id4.pid == LNET_PID_ANY)
+ if (LNET_NID_IS_ANY(&id->nid) ||
+ id->pid == LNET_PID_ANY)
return (-EINVAL);
- id.pid = id4.pid;
- lnet_nid4_to_nid(id4.nid, &id.nid);
-
/* Have a brand new peer_ni ready... */
- peer_ni = ksocknal_create_peer(ni, &id);
+ peer_ni = ksocknal_create_peer(ni, id);
if (IS_ERR(peer_ni))
return PTR_ERR(peer_ni);
LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
>= 0);
- peer2 = ksocknal_find_peer_locked(ni, &id);
+ peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2 != NULL) {
ksocknal_peer_decref(peer_ni);
peer_ni = peer2;
} else {
/* peer_ni table takes my ref on peer_ni */
hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list,
- nidhash(&id.nid));
+ nidhash(&id->nid));
}
- ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
-
- /* Remember conns_per_peer setting at the time
- * of connection initiation. It will define the
- * max number of conns per type for this conn_cb
- * while it's in use.
- */
- conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
+ if (peer_ni->ksnp_conn_cb) {
+ ksocknal_conn_cb_decref(conn_cb);
+ } else {
+ ksocknal_add_conn_cb_locked(peer_ni, conn_cb);
+ /* Remember conns_per_peer setting at the time
+ * of connection initiation. It will define the
+ * max number of conns per type for this conn_cb
+ * while it's in use.
+ */
+ conn_cb->ksnr_max_conns = ksocknal_get_conns_per_peer(peer_ni);
+ }
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
static void
-ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni)
{
struct ksock_conn *conn;
struct ksock_conn *cnxt;
}
static int
-ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id4, __u32 ip)
+ksocknal_del_peer(struct lnet_ni *ni, struct lnet_processid *id)
{
LIST_HEAD(zombies);
struct hlist_node *pnxt;
int hi;
int i;
int rc = -ENOENT;
- struct lnet_processid id;
-
- id.pid = id4.pid;
- lnet_nid4_to_nid(id4.nid, &id.nid);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (!LNET_NID_IS_ANY(&id.nid)) {
- lo = hash_min(nidhash(&id.nid),
+ if (id && !LNET_NID_IS_ANY(&id->nid)) {
+ lo = hash_min(nidhash(&id->nid),
HASH_BITS(ksocknal_data.ksnd_peers));
hi = lo;
} else {
if (peer_ni->ksnp_ni != ni)
continue;
- if (!((LNET_NID_IS_ANY(&id.nid) ||
- nid_same(&peer_ni->ksnp_id.nid, &id.nid)) &&
- (id.pid == LNET_PID_ANY ||
- peer_ni->ksnp_id.pid == id.pid)))
+ if (!((!id || LNET_NID_IS_ANY(&id->nid) ||
+ nid_same(&peer_ni->ksnp_id.nid, &id->nid)) &&
+ (!id || id->pid == LNET_PID_ANY ||
+ peer_ni->ksnp_id.pid == id->pid)))
continue;
ksocknal_peer_addref(peer_ni); /* a ref for me... */
- ksocknal_del_peer_locked(peer_ni, ip);
+ ksocknal_del_peer_locked(peer_ni);
if (peer_ni->ksnp_closing &&
!list_empty(&peer_ni->ksnp_tx_queue)) {
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
- struct lnet_process_id peerid4;
+ struct lnet_processid peerid;
u64 incarnation;
struct ksock_conn *conn;
struct ksock_conn *conn2;
/* Active connection sends HELLO eagerly */
hello->kshm_nips = 0;
- peerid4 = lnet_pid_to_pid4(&peer_ni->ksnp_id);
+ peerid = peer_ni->ksnp_id;
write_lock_bh(global_lock);
conn->ksnc_proto = peer_ni->ksnp_proto;
#endif
}
- rc = ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
if (rc != 0)
goto failed_1;
} else {
- peerid4.nid = LNET_NID_ANY;
- peerid4.pid = LNET_PID_ANY;
+ peerid.nid = LNET_ANY_NID;
+ peerid.pid = LNET_PID_ANY;
/* Passive, get protocol from peer_ni */
conn->ksnc_proto = NULL;
}
- rc = ksocknal_recv_hello(ni, conn, hello, &peerid4, &incarnation);
+ rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
if (rc < 0)
goto failed_1;
LASSERT(rc == 0 || active);
LASSERT(conn->ksnc_proto != NULL);
- LASSERT(peerid4.nid != LNET_NID_ANY);
+ LASSERT(!LNET_NID_IS_ANY(&peerid.nid));
- cpt = lnet_cpt_of_nid(peerid4.nid, ni);
+ cpt = lnet_nid2cpt(&peerid.nid, ni);
if (active) {
ksocknal_peer_addref(peer_ni);
write_lock_bh(global_lock);
} else {
- struct lnet_processid peerid;
-
- lnet_pid4_to_pid(peerid4, &peerid);
peer_ni = ksocknal_create_peer(ni, &peerid);
if (IS_ERR(peer_ni)) {
rc = PTR_ERR(peer_ni);
/* Am I already connecting to this guy? Resolve in
* favour of higher NID...
*/
- if (peerid4.nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ if (memcmp(&peerid.nid, &ni->ni_nid, sizeof(peerid.nid)) < 0 &&
ksocknal_connecting(peer_ni->ksnp_conn_cb,
((struct sockaddr *) &conn->ksnc_peeraddr))) {
rc = EALREADY;
}
write_unlock_bh(global_lock);
-
/* We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
* NB (a) we still have to send the reply HELLO for passive
CDEBUG(D_NET, "New conn %s p %d.x %pIS -> %pISp"
" incarnation:%lld sched[%d]\n",
- libcfs_id2str(peerid4), conn->ksnc_proto->pro_version,
+ libcfs_idstr(&peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myaddr, &conn->ksnc_peeraddr,
incarnation, cpt);
if (!active) {
hello->kshm_nips = 0;
- rc = ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ rc = ksocknal_send_hello(ni, conn, &peerid.nid, hello);
}
LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
if (warn != NULL) {
if (rc < 0)
CERROR("Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid4), conn->ksnc_type, warn);
+ libcfs_idstr(&peerid), conn->ksnc_type, warn);
else
CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid4), conn->ksnc_type, warn);
+ libcfs_idstr(&peerid), conn->ksnc_type, warn);
}
if (!active) {
*/
conn->ksnc_type = SOCKLND_CONN_NONE;
hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, peerid4.nid, hello);
+ ksocknal_send_hello(ni, conn, &peerid.nid, hello);
}
write_lock_bh(global_lock);
}
void
-ksocknal_notify_gw_down(lnet_nid_t gw_nid)
+ksocknal_notify_gw_down(struct lnet_nid *gw_nid)
{
/* The router is telling me she's been notified of a change in
* gateway state....
*/
struct lnet_processid id = {
.pid = LNET_PID_ANY,
+ .nid = *gw_nid,
};
- CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
+ CDEBUG(D_NET, "gw %s down\n", libcfs_nidstr(gw_nid));
- lnet_nid4_to_nid(gw_nid, &id.nid);
/* If the gateway crashed, close all open connections... */
ksocknal_close_matching_conns(&id, 0);
return;
/* We can only establish new connections
- * if we have autroutes, and these connect on demand. */
+ * if we have autroutes, and these connect on demand.
+ */
}
static void
int
ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
- struct lnet_process_id id4 = {};
struct lnet_processid id = {};
struct libcfs_ioctl_data *data = arg;
int rc;
int share_count = 0;
rc = ksocknal_get_peer_info(ni, data->ioc_count,
- &id4, &myip, &ip, &port,
+ &id, &myip, &ip, &port,
&conn_count, &share_count);
if (rc != 0)
return rc;
-
- data->ioc_nid = id4.nid;
+ if (!nid_is_nid4(&id.nid))
+ return -EINVAL;
+ data->ioc_nid = lnet_nid_to_nid4(&id.nid);
data->ioc_count = share_count;
data->ioc_u32[0] = ip;
data->ioc_u32[1] = port;
data->ioc_u32[2] = myip;
data->ioc_u32[3] = conn_count;
- data->ioc_u32[4] = id4.pid;
+ data->ioc_u32[4] = id.pid;
return 0;
}
case IOC_LIBCFS_ADD_PEER: {
struct sockaddr_in sa = {.sin_family = AF_INET};
- id4.nid = data->ioc_nid;
- id4.pid = LNET_PID_LUSTRE;
+ id.pid = LNET_PID_LUSTRE;
+ lnet_nid4_to_nid(data->ioc_nid, &id.nid);
sa.sin_addr.s_addr = htonl(data->ioc_u32[0]);
sa.sin_port = htons(data->ioc_u32[1]);
- return ksocknal_add_peer(ni, id4, (struct sockaddr *)&sa);
+ return ksocknal_add_peer(ni, &id, (struct sockaddr *)&sa);
}
case IOC_LIBCFS_DEL_PEER:
- id4.nid = data->ioc_nid;
- id4.pid = LNET_PID_ANY;
- return ksocknal_del_peer(ni, id4,
- data->ioc_u32[0]); /* IP */
+ lnet_nid4_to_nid(data->ioc_nid, &id.nid);
+ id.pid = LNET_PID_ANY;
+ return ksocknal_del_peer(ni, &id);
case IOC_LIBCFS_GET_CONN: {
int txmem;
switch (ksocknal_data.ksnd_init) {
default:
LASSERT(0);
- /* fallthrough */
+ fallthrough;
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
ksocknal_shutdown(struct lnet_ni *ni)
{
struct ksock_net *net = ni->ni_data;
- struct lnet_process_id anyid = {
- .nid = LNET_NID_ANY,
- .pid = LNET_PID_ANY,
- };
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
/* Delete all peers */
- ksocknal_del_peer(ni, anyid, 0);
+ ksocknal_del_peer(ni, NULL);
/* Wait for all peer_ni state to clean up */
wait_var_event_warning(&net->ksnn_npeers,