LNet nids are currently limited to 4-bytes for addresses.
This excludes the use of IPv6.
In order to support IPv6, introduce 'struct lnet_nid' which can hold
up to 128bit address and is extensible, and deprecate 'lnet_nid_t'.
lnet_nid_it will eventually be removed. Where lnet_nid_t is often
passed around by value, 'struct lnet_nid' will normally be passed
around by reference as it is over twice as large.
The net_type field, which currently has value up to 16, is now limited
to 0-254 with 255 being used as a wildcard. The most significant byte
is now a size field which gives the size of the whole nid minus 8. So
zero is correct for current nids with 4-byte addresses.
Where we still need to use 4-byte-address nids, we will use names
containing "nid4". So "nid4" is a lnet_nid_t when "nid" is a struct
lnet_nid. lnet_nid_to_nid4 converts a 'struct lnet_nid' to an
lnet_nid_t.
While lnet_nid_t is stored and often transmitted in host-endian format
(and possibly byte-swapped on receipt), 'struct lnet_nid' is always
stored in network-byte-order (i.e. big-endian). This is more common
approach for network addresses.
In this first instance, 'struct lnet_nid' is used for ni_nid in
'struct lnet_ni', and related support functions.
In particular libcfs_nidstr() is introduced which parallels
libcfs_nid2str(), but takes 'struct lnet_nid'.
In cases were we need to have similar functions for old and new style
nid, the new function is introduced with a slightly different name,
such as libcfs_nid2str above, or LNET_NID_NET (like LNET_NIDNET).
It will be confusing having both, but the plan is to remove the old
names as soon as practical.
Test-Parameters: trivial
Test-Parameters: serverversion=2.12 serverdistro=el7.9 testlist=runtests
Test-Parameters: clientversion=2.12 testlist=runtests
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I4dcf1bab856621915b6535958d77cdde89105d96
Reviewed-on: https://review.whamcloud.com/42100
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Serguei Smirnov <ssmirnov@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Chris Horn <chris.horn@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
if (ni->ni_status && ni->ni_status->ns_status != status) {
CDEBUG(D_NET, "ni %s status changed from %#x to %#x\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
ni->ni_status->ns_status, status);
ni->ni_status->ns_status = status;
update = true;
lnet_ni_get_status_locked(struct lnet_ni *ni)
__must_hold(&ni->ni_lock)
{
- if (ni->ni_nid == LNET_NID_LO_0)
+ if (nid_is_lo0(&ni->ni_nid))
return LNET_NI_STATUS_UP;
else if (atomic_read(&ni->ni_fatal_error_on))
return LNET_NI_STATUS_DOWN;
__u32 *ni_cpts;
/* interface's NID */
- lnet_nid_t ni_nid;
+ struct lnet_nid ni_nid;
/* instance-specific data */
void *ni_data;
* These are sent in sender's byte order (i.e. receiver flips).
*/
-/**
- * Address of an end-point in an LNet network.
+/** Address of an end-point in an LNet network.
*
* A node can have multiple end-points and hence multiple addresses.
* An LNet network can be a simple network (e.g. tcp0) or a network of
* LNet networks connected by LNet routers. Therefore an end-point address
* has two parts: network ID, and address within a network.
+ * The most-significant-byte in this format is always 0. A larger value
+ * would imply a larger nid with a larger address.
*
* \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
*/
typedef __u64 lnet_nid_t;
+/*
+ * Address of LNet end-point in extended form
+ *
+ * To support addresses larger than 32bits we have
+ * an extended nid which supports up to 128 bits
+ * of address and is extensible.
+ * If nid_size is 0, then the nid can be stored in an lnet_nid_t,
+ * and the first 8 bytes of the 'struct lnet_nid' are identical to
+ * the lnet_nid_t in big-endian format.
+ * If nid_type == 0xff, then all other fields should be ignored
+ * and this is an ANY wildcard address. In particular, the nid_size
+ * can be 0xff without making the address too big to fit.
+ */
+struct lnet_nid {
+ __u8 nid_size; /* total bytes - 8 */
+ __u8 nid_type;
+ __be16 nid_num;
+ __be32 nid_addr[4];
+} __attribute__((packed));
+
+#define NID_BYTES(nid) ((nid)->nid_size + 8)
+#define NID_ADDR_BYTES(nid) ((nid)->nid_size + 4)
+
/**
* ID of a process in a node. Shortened as PID to distinguish from
* lnet_process_id, the global process ID.
#ifndef __UAPI_LNET_TYPES_H__
#define __UAPI_LNET_TYPES_H__
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
/** \addtogroup lnet
* @{ */
/** wildcard PID that matches any lnet_pid_t */
#define LNET_PID_ANY ((lnet_pid_t) -1)
+static inline int LNET_NID_IS_ANY(const struct lnet_nid *nid)
+{
+ /* A NULL pointer can be used to mean "ANY" */
+ return !nid || nid->nid_type == 0xFF;
+}
+
+#define LNET_ANY_NID ((struct lnet_nid) \
+ {0xFF, 0xFF, ~0, {~0, ~0, ~0, ~0} })
+
#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
#define LNET_PID_LUSTRE 12345
static inline __u32 LNET_NETTYP(__u32 net)
{
- return (net >> 16) & 0xffff;
+ return (net >> 16) & 0xff;
}
static inline __u32 LNET_MKNET(__u32 type, __u32 num)
#define LNET_NET_ANY LNET_NIDNET(LNET_NID_ANY)
+static inline int nid_is_nid4(const struct lnet_nid *nid)
+{
+ return NID_ADDR_BYTES(nid) == 4;
+}
+
+/* LOLND may not be defined yet, so we cannot use an inline */
+#define nid_is_lo0(__nid) \
+ ((__nid)->nid_type == LOLND && \
+ nid_is_nid4(__nid) && \
+ (__nid)->nid_num == 0 && \
+ (__nid)->nid_addr[0] == 0)
+
+static inline __u32 LNET_NID_NET(const struct lnet_nid *nid)
+{
+ return LNET_MKNET(nid->nid_type, __be16_to_cpu(nid->nid_num));
+}
+
+static inline void lnet_nid4_to_nid(lnet_nid_t nid4, struct lnet_nid *nid)
+{
+ if (nid4 == LNET_NID_ANY) {
+ /* equal to setting to LNET_ANY_NID */
+ memset(nid, 0xff, sizeof(*nid));
+ return;
+ }
+
+ nid->nid_size = 0;
+ nid->nid_type = LNET_NETTYP(LNET_NIDNET(nid4));
+ nid->nid_num = __cpu_to_be16(LNET_NETNUM(LNET_NIDNET(nid4)));
+ nid->nid_addr[0] = __cpu_to_be32(LNET_NIDADDR(nid4));
+ nid->nid_addr[1] = nid->nid_addr[2] = nid->nid_addr[3] = 0;
+}
+
+static inline lnet_nid_t lnet_nid_to_nid4(const struct lnet_nid *nid)
+{
+ if (LNET_NID_IS_ANY(nid))
+ return LNET_NID_ANY;
+
+ return LNET_MKNID(LNET_NID_NET(nid), __be32_to_cpu(nid->nid_addr[0]));
+}
+
+static inline int nid_same(const struct lnet_nid *n1,
+ const struct lnet_nid *n2)
+{
+ return n1->nid_size == n2->nid_size &&
+ n1->nid_type == n2->nid_type &&
+ n1->nid_num == n2->nid_num &&
+ n1->nid_addr[0] == n2->nid_addr[0] &&
+ n1->nid_addr[1] == n2->nid_addr[1] &&
+ n1->nid_addr[2] == n2->nid_addr[2] &&
+ n1->nid_addr[3] == n2->nid_addr[3];
+}
+
struct lnet_counters_health {
__u32 lch_rst_alloc;
__u32 lch_resend_count;
struct list_head;
#define LNET_NIDSTR_COUNT 1024 /* # of nidstrings */
-#define LNET_NIDSTR_SIZE 32 /* size of each one (see below for usage) */
+#define LNET_NIDSTR_SIZE 64 /* size of each one (see below for usage) */
/* support decl needed by both kernel and user space */
char *libcfs_next_nidstring(void);
return libcfs_nid2str_r(nid, libcfs_next_nidstring(),
LNET_NIDSTR_SIZE);
}
+char *libcfs_nidstr_r(const struct lnet_nid *nid,
+ char *buf, __kernel_size_t buf_size);
+static inline char *libcfs_nidstr(const struct lnet_nid *nid)
+{
+ return libcfs_nidstr_r(nid, libcfs_next_nidstring(),
+ LNET_NIDSTR_SIZE);
+}
__u32 libcfs_str2net(const char *str);
lnet_nid_t libcfs_str2nid(const char *str);
int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
int loopback;
int count = 0;
- loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
+ loopback = (peer->gnp_nid ==
+ lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
list_for_each_entry_safe(conn, cnxt, &peer->gnp_conns, gnc_list) {
if (conn->gnc_state != GNILND_CONN_ESTABLISHED)
int loopback;
ENTRY;
- loopback = peer->gnp_nid == peer->gnp_net->gnn_ni->ni_nid;
+ loopback = (peer->gnp_nid ==
+ lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
list_for_each_entry(conn, &peer->gnp_conns, gnc_list) {
CDEBUG(D_NET, "checking conn 0x%p for peer %s"
net = nets[i];
- peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
- peer->gnp_nid);
+ peer_nid = kgnilnd_lnd2lnetnid(
+ lnet_nid_to_nid4(&net->gnn_ni->ni_nid),
+ peer->gnp_nid);
CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
/* The nid passed in does not yet contain the net portion.
* Let's build it up now
*/
- nid = LNET_MKNID(LNET_NIDNET(net->gnn_ni->ni_nid), nid);
+ nid = LNET_MKNID(LNET_NID_NET(&net->gnn_ni->ni_nid), nid);
rc = kgnilnd_add_peer(net, nid, &new_peer);
if (rc) {
* LNET assumes a conn and peer per net, the LNET_MKNID/LNET_NIDADDR allows us to let Lnet see what it
* wants to see instead of the underlying network that is being used to send the data
*/
- data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(nid));
+ data->ioc_nid = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
+ LNET_NIDADDR(nid));
data->ioc_flags = peer_connecting;
data->ioc_count = peer_refcount;
/* LNET_MKNID is used to build the correct address based on what LNET wants to see instead of
* the generic connection that is used to send the data
*/
- data->ioc_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), LNET_NIDADDR(conn->gnc_peer->gnp_nid));
+ data->ioc_nid = LNET_MKNID(LNET_NID_NET(&ni->ni_nid),
+ LNET_NIDADDR(conn->gnc_peer->gnp_nid));
data->ioc_u32[0] = conn->gnc_device->gnd_id;
kgnilnd_conn_decref(conn);
}
}
case IOC_LIBCFS_REGISTER_MYNID: {
/* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid) {
+ if (data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid)) {
rc = 0;
} else {
CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
rc = -EINVAL;
}
break;
atomic_set(&net->gnn_refcount, 1);
/* if we have multiple devices, spread the nets around */
- net->gnn_netnum = LNET_NETNUM(LNET_NIDNET(ni->ni_nid));
+ net->gnn_netnum = LNET_NETNUM(LNET_NID_NET(&ni->ni_nid));
- devno = LNET_NIDNET(ni->ni_nid) % GNILND_MAXDEVS;
+ devno = LNET_NID_NET(&ni->ni_nid) % GNILND_MAXDEVS;
net->gnn_dev = &kgnilnd_data.kgn_devices[devno];
/* allocate a 'dummy' cdm for datagram use. We can only have a single
/* the instance id for the cdm is the NETNUM offset by MAXDEVS -
* ensuring we'll have a unique id */
-
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), net->gnn_dev->gnd_nid);
+ ni->ni_nid.nid_addr[0] =
+ cpu_to_be32(LNET_NIDADDR(net->gnn_dev->gnd_nid));
CDEBUG(D_NET, "adding net %p nid=%s on dev %d \n",
- net, libcfs_nid2str(ni->ni_nid), net->gnn_dev->gnd_id);
+ net, libcfs_nidstr(&ni->ni_nid), net->gnn_dev->gnd_id);
/* until the gnn_list is set, we need to cleanup ourselves as
* kgnilnd_shutdown is just gonna get confused */
wake_up_var(&kgnilnd_data); \
}while (0)
-#define kgnilnd_net_addref(net) \
-do { \
- int val = atomic_inc_return(&net->gnn_refcount); \
- LASSERTF(val > 1, "net %p refcount %d\n", net, val); \
- CDEBUG(D_NETTRACE, "net %p->%s++ (%d)\n", net, \
- libcfs_nid2str(net->gnn_ni->ni_nid), val); \
+#define kgnilnd_net_addref(net) \
+do { \
+ int val = atomic_inc_return(&net->gnn_refcount); \
+ LASSERTF(val > 1, "net %p refcount %d\n", net, val); \
+ CDEBUG(D_NETTRACE, "net %p->%s++ (%d)\n", net, \
+ libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
} while (0)
-#define kgnilnd_net_decref(net) \
-do { \
- int val = atomic_dec_return(&net->gnn_refcount); \
- LASSERTF(val >= 0, "net %p refcount %d\n", net, val); \
- CDEBUG(D_NETTRACE, "net %p->%s-- (%d)\n", net, \
- libcfs_nid2str(net->gnn_ni->ni_nid), val); \
+#define kgnilnd_net_decref(net) \
+do { \
+ int val = atomic_dec_return(&net->gnn_refcount); \
+ LASSERTF(val >= 0, "net %p refcount %d\n", net, val); \
+ CDEBUG(D_NETTRACE, "net %p->%s-- (%d)\n", net, \
+ libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
} while (0)
#define kgnilnd_peer_addref(peer) \
if (conn->gnc_peer) {
loopback = conn->gnc_peer->gnp_nid ==
- conn->gnc_peer->gnp_net->gnn_ni->ni_nid;
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
} else {
/* short circuit - a conn that didn't complete
* setup never needs a purgatory hold */
return -ESHUTDOWN;
}
- list_for_each_entry(net, kgnilnd_netnum2netlist(LNET_NETNUM(LNET_NIDNET(nid))), gnn_list) {
- if (!net->gnn_shutdown && LNET_NIDNET(net->gnn_ni->ni_nid) == LNET_NIDNET(nid)) {
+ list_for_each_entry(net,
+ kgnilnd_netnum2netlist(LNET_NETNUM(LNET_NIDNET(nid))),
+ gnn_list) {
+ if (!net->gnn_shutdown &&
+ LNET_NID_NET(&net->gnn_ni->ni_nid) == LNET_NIDNET(nid)) {
kgnilnd_net_addref(net);
up_read(&kgnilnd_data.kgn_net_rw_sem);
*netp = net;
break;
if ((reverse_rdma_flag & GNILND_REVERSE_GET) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
break;
if ((reverse_rdma_flag & GNILND_REVERSE_PUT) == 0)
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ,
+ lnet_nid_to_nid4(&ni->ni_nid));
else
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_REQ_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
LASSERTF(nob <= *kgnilnd_tunables.kgn_max_immediate,
"lntmsg 0x%p too large %d\n", lntmsg, nob);
- tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_IMMEDIATE,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
rc = -ENOMEM;
goto out;
LBUG();
}
- tx = kgnilnd_new_tx_msg(done_type, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(done_type, lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL)
goto failed_0;
failed_1:
kgnilnd_tx_done(tx, rc);
- kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
failed_0:
lnet_finalize(lntmsg, rc);
}
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_PUT_ACK,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
nak_put_req:
/* make sure we send an error back when the PUT fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
/* lntmsg can be null when parsing a LNET_GET */
if (lntmsg != NULL) {
/* sending ACK with sink buff. info */
- tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV, ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_GET_ACK_REV,
+ lnet_nid_to_nid4(&ni->ni_nid));
if (tx == NULL) {
kgnilnd_consume_rx(rx);
RETURN(-ENOMEM);
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
nak_get_req_rev:
/* make sure we send an error back when the GET fails */
- kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
+ kgnilnd_nak_rdma(conn, rxmsg->gnm_type, rc,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
kgnilnd_tx_done(tx, rc);
kgnilnd_consume_rx(rx);
/* only error if lntmsg == NULL, otherwise we are just
* short circuiting the rdma process of 0 bytes */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- lntmsg == NULL ? -ENOENT : 0,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ lntmsg == NULL ? -ENOENT : 0,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
RETURN(0);
}
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
} else {
/* No match */
kgnilnd_nak_rdma(conn, rxmsg->gnm_type,
- -ENOENT,
- rxmsg->gnm_u.get.gngm_cookie,
- ni->ni_nid);
+ -ENOENT,
+ rxmsg->gnm_u.get.gngm_cookie,
+ lnet_nid_to_nid4(&ni->ni_nid));
}
kgnilnd_consume_rx(rx);
RETURN(0);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx == NULL)
return 0;
kgnilnd_queue_tx(conn, tx);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
return;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
int rc;
if (conn->gnc_ephandle != NULL) {
int rc = 0;
- tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE, conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_CLOSE,
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid));
if (tx != NULL) {
tx->tx_msg.gnm_u.completion.gncm_retval = conn->gnc_error;
tx->tx_state = GNILND_TX_WAITING_COMPLETION;
return rc;
}
- if (net->gnn_ni->ni_nid != connreq->gncr_dstnid) {
+ if (lnet_nid_to_nid4(&net->gnn_ni->ni_nid) !=
+ connreq->gncr_dstnid) {
CERROR("Bad connection data from %s: she sent "
"dst_nid %s, but I am %s with dgram 0x%p@%s\n",
libcfs_nid2str(connreq->gncr_srcnid),
libcfs_nid2str(connreq->gncr_dstnid),
- libcfs_nid2str(net->gnn_ni->ni_nid),
+ libcfs_nidstr(&net->gnn_ni->ni_nid),
dgram, kgnilnd_dgram_type2str(dgram));
kgnilnd_net_decref(net);
return -EBADSLT;
/* Dont send NOOP if fail_loc is set
*/
if (!CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP)) {
- tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, peer->gnp_net->gnn_ni->ni_nid);
+ tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP,
+ lnet_nid_to_nid4(&peer->gnp_net->gnn_ni->ni_nid));
if (tx == NULL) {
CNETERR("can't get TX to initiate NOOP to %s\n",
libcfs_nid2str(peer->gnp_nid));
{
struct kib_net *net = ni->ni_data;
- /* CAVEAT EMPTOR! all message fields not set here should have been
- * initialised previously. */
- msg->ibm_magic = IBLND_MSG_MAGIC;
- msg->ibm_version = version;
- /* ibm_type */
- msg->ibm_credits = credits;
- /* ibm_nob */
- msg->ibm_cksum = 0;
- msg->ibm_srcnid = ni->ni_nid;
- msg->ibm_srcstamp = net->ibn_incarnation;
- msg->ibm_dstnid = dstnid;
- msg->ibm_dststamp = dststamp;
-
- if (*kiblnd_tunables.kib_cksum) {
- /* NB ibm_cksum zero while computing cksum */
- msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
- }
+ /* CAVEAT EMPTOR! all message fields not set here should have been
+ * initialised previously.
+ */
+ msg->ibm_magic = IBLND_MSG_MAGIC;
+ msg->ibm_version = version;
+ /* ibm_type */
+ msg->ibm_credits = credits;
+ /* ibm_nob */
+ msg->ibm_cksum = 0;
+ msg->ibm_srcnid = lnet_nid_to_nid4(&ni->ni_nid);
+ msg->ibm_srcstamp = net->ibn_incarnation;
+ msg->ibm_dstnid = dstnid;
+ msg->ibm_dststamp = dststamp;
+
+ if (*kiblnd_tunables.kib_cksum) {
+ /* NB ibm_cksum zero while computing cksum */
+ msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
+ }
}
int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
* created.
*/
if (peer_ni->ibp_nid != nid ||
- peer_ni->ibp_ni->ni_nid != ni->ni_nid)
+ !nid_same(&peer_ni->ibp_ni->ni_nid, &ni->ni_nid))
continue;
CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
list_for_each_entry(net, &hdev->ibh_dev->ibd_nets, ibn_list) {
if (val)
CDEBUG(D_NETERROR, "Fatal device error for NI %s\n",
- libcfs_nid2str(net->ibn_ni->ni_nid));
+ libcfs_nidstr(&net->ibn_ni->ni_nid));
atomic_set(&net->ibn_ni->ni_fatal_error_on, val);
}
}
wait_var_event_warning(&net->ibn_npeers,
atomic_read(&net->ibn_npeers) == 0,
"%s: waiting for %d peers to disconnect\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
atomic_read(&net->ibn_npeers));
kiblnd_net_fini_pools(net);
}
net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
+ ni->ni_nid.nid_addr[0] = cpu_to_be32(ibdev->ibd_ifip);
ni->ni_dev_cpt = ifaces[i].li_cpt;
int rc;
int err = -EIO;
- LASSERT (net != NULL);
- LASSERT (rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
+ LASSERT(net);
+ LASSERT(rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
- LASSERT (nob >= 0);
- rx->rx_nob = nob;
+ LASSERT(nob >= 0);
+ rx->rx_nob = nob;
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR ("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR ("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
+ /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
return;
}
write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
+ }
+ kiblnd_handle_rx(rx);
+ return;
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
+failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
static int
rej.ibr_incarnation = net->ibn_incarnation;
}
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
+ if (ni == NULL || /* no matching net */
+ lnet_nid_to_nid4(&ni->ni_nid) !=
+ reqmsg->ibm_dstnid || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
- ni ? libcfs_nid2str(ni->ni_nid) : "NA",
+ ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
* the lower NID connection win so we can move forward.
*/
if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
+ nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
}
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
+ if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
case IB_EVENT_PORT_ERR:
case IB_EVENT_DEVICE_FATAL:
CERROR("Fatal device error for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
return;
case IB_EVENT_PORT_ACTIVE:
CERROR("Port reactivated for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
return;
/* Am I already connecting to this guy? Resolve in
* favour of higher NID...
*/
- if (peerid.nid < ni->ni_nid &&
+ if (peerid.nid < lnet_nid_to_nid4(&ni->ni_nid) &&
ksocknal_connecting(peer_ni->ksnp_conn_cb,
((struct sockaddr *) &conn->ksnc_peeraddr))) {
rc = EALREADY;
return ksocknal_close_matching_conns (id,
data->ioc_u32[0]);
- case IOC_LIBCFS_REGISTER_MYNID:
- /* Ignore if this is a noop */
- if (data->ioc_nid == ni->ni_nid)
- return 0;
+ case IOC_LIBCFS_REGISTER_MYNID:
+ /* Ignore if this is a noop */
+ if (nid_is_nid4(&ni->ni_nid) &&
+ data->ioc_nid == lnet_nid_to_nid4(&ni->ni_nid))
+ return 0;
- CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
- libcfs_nid2str(data->ioc_nid),
- libcfs_nid2str(ni->ni_nid));
- return -EINVAL;
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nidstr(&ni->ni_nid));
+ return -EINVAL;
case IOC_LIBCFS_PUSH_CONNECTION:
id.nid = data->ioc_nid;
LASSERT(ksi);
LASSERT(ksi->ksni_addr.ss_family == AF_INET);
- ni->ni_nid = LNET_MKNID(
- LNET_NIDNET(ni->ni_nid),
- ntohl(((struct sockaddr_in *)
- &ksi->ksni_addr)->sin_addr.s_addr));
+ ni->ni_nid.nid_addr[0] =
+ ((struct sockaddr_in *)&ksi->ksni_addr)->sin_addr.s_addr;
list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
net->ksnn_ni = ni;
ksocknal_data.ksnd_nnets++;
/* rely on caller to hold a ref on socket so it wouldn't disappear */
LASSERT(conn->ksnc_proto != NULL);
- hello->kshm_src_nid = ni->ni_nid;
+ hello->kshm_src_nid = lnet_nid_to_nid4(&ni->ni_nid);
hello->kshm_dst_nid = peer_nid;
hello->kshm_src_pid = the_lnet.ln_pid;
struct lnet_process_id *peerid,
__u64 *incarnation)
{
- /* Return < 0 fatal error
- * 0 success
- * EALREADY lost connection race
- * EPROTO protocol version mismatch
- */
+ /* Return < 0 fatal error
+ * 0 success
+ * EALREADY lost connection race
+ * EPROTO protocol version mismatch
+ */
struct socket *sock = conn->ksnc_sock;
- int active = (conn->ksnc_proto != NULL);
- int timeout;
- int proto_match;
- int rc;
+ int active = (conn->ksnc_proto != NULL);
+ int timeout;
+ int proto_match;
+ int rc;
const struct ksock_proto *proto;
struct lnet_process_id recv_id;
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
timeout = active ? ksocknal_timeout() :
- lnet_acceptor_timeout();
+ lnet_acceptor_timeout();
rc = lnet_sock_read(sock, &hello->kshm_magic,
sizeof(hello->kshm_magic), timeout);
rc = lnet_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
- if (rc != 0) {
+ if (rc != 0) {
CERROR("Error %d reading HELLO from %pIS\n",
rc, &conn->ksnc_peeraddr);
LASSERT(rc < 0);
- return rc;
- }
+ return rc;
+ }
- proto = ksocknal_parse_proto_version(hello);
- if (proto == NULL) {
- if (!active) {
- /* unknown protocol from peer_ni, tell peer_ni my protocol */
- conn->ksnc_proto = &ksocknal_protocol_v3x;
+ proto = ksocknal_parse_proto_version(hello);
+ if (proto == NULL) {
+ if (!active) {
+ /* unknown protocol from peer_ni,
+ * tell peer_ni my protocol.
+ */
+ conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
- if (*ksocknal_tunables.ksnd_protocol == 2)
- conn->ksnc_proto = &ksocknal_protocol_v2x;
- else if (*ksocknal_tunables.ksnd_protocol == 1)
- conn->ksnc_proto = &ksocknal_protocol_v1x;
+ if (*ksocknal_tunables.ksnd_protocol == 2)
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ else if (*ksocknal_tunables.ksnd_protocol == 1)
+ conn->ksnc_proto = &ksocknal_protocol_v1x;
#endif
- hello->kshm_nips = 0;
- ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
- }
+ hello->kshm_nips = 0;
+ ksocknal_send_hello(ni, conn,
+ lnet_nid_to_nid4(&ni->ni_nid),
+ hello);
+ }
CERROR("Unknown protocol version (%d.x expected) from %pIS\n",
conn->ksnc_proto->pro_version, &conn->ksnc_peeraddr);
- return -EPROTO;
- }
+ return -EPROTO;
+ }
- proto_match = (conn->ksnc_proto == proto);
- conn->ksnc_proto = proto;
+ proto_match = (conn->ksnc_proto == proto);
+ conn->ksnc_proto = proto;
- /* receive the rest of hello message anyway */
- rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
- if (rc != 0) {
+ /* receive the rest of hello message anyway */
+ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
+ if (rc != 0) {
CERROR("Error %d reading or checking hello from from %pIS\n",
rc, &conn->ksnc_peeraddr);
- LASSERT (rc < 0);
- return rc;
- }
+ LASSERT(rc < 0);
+ return rc;
+ }
- *incarnation = hello->kshm_src_incarnation;
+ *incarnation = hello->kshm_src_incarnation;
if (hello->kshm_src_nid == LNET_NID_ANY) {
CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pIS\n",
LNET_PID_USERFLAG;
LASSERT(conn->ksnc_peeraddr.ss_family == AF_INET);
recv_id.nid = LNET_MKNID(
- LNET_NIDNET(ni->ni_nid),
+ LNET_NID_NET(&ni->ni_nid),
ntohl(((struct sockaddr_in *)
&conn->ksnc_peeraddr)->sin_addr.s_addr));
} else {
recv_id.pid = hello->kshm_src_pid;
}
- if (!active) {
- *peerid = recv_id;
+ if (!active) {
+ *peerid = recv_id;
/* peer_ni determines type */
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
return -EPROTO;
}
- if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
- /* Possible protocol mismatch or I lost the connection race */
- return proto_match ? EALREADY : EPROTO;
- }
+ if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
+ /* Possible protocol mismatch or I lost the connection race */
+ return proto_match ? EALREADY : EPROTO;
+ }
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
CERROR("Mismatched types: me %d, %s ip %pIS %d\n",
ni = lnet_nid2ni_addref(cr.acr_nid);
if (ni == NULL || /* no matching net */
- ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
+ lnet_nid_to_nid4(&ni->ni_nid) != cr.acr_nid) {
+ /* right NET, wrong NID! */
if (ni != NULL)
lnet_ni_decref(ni);
LCONSOLE_ERROR_MSG(0x120,
BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
+ /* Checks for struct lnet_nid */
+ BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
+
/* Checks for struct lnet_process_id_packed */
BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
}
struct lnet_ni *
-lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
+lnet_nid2ni_locked(lnet_nid_t nid4, int cpt)
{
- struct lnet_net *net;
- struct lnet_ni *ni;
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+ struct lnet_nid nid;
LASSERT(cpt != LNET_LOCK_EX);
+ lnet_nid4_to_nid(nid4, &nid);
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid)
+ if (nid_same(&ni->ni_nid, &nid))
return ni;
}
}
ns = &pbuf->pb_info.pi_ni[i];
- ns->ns_nid = ni->ni_nid;
+ if (!nid_is_nid4(&ni->ni_nid))
+ continue;
+ ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
lnet_ni_lock(ni);
ns->ns_status = lnet_ni_get_status_locked(ni);
if ((i & (-i)) == i) {
CDEBUG(D_WARNING,
"Waiting for zombie LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
}
schedule_timeout_uninterruptible(cfs_time_seconds(1));
if (!islo)
CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
lnet_ni_free(ni);
i = 2;
atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
ni->ni_net->net_tunables.lct_peer_tx_credits,
lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
ni->ni_net->net_tunables.lct_peer_rtr_credits,
size_t min_size = 0;
int i;
- if (!ni || !cfg_ni || !tun)
+ if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
return;
if (ni->ni_interface != NULL) {
sizeof(cfg_ni->lic_ni_intf));
}
- cfg_ni->lic_nid = ni->ni_nid;
+ cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
size_t min_size, tunable_size = 0;
int i;
- if (!ni || !config)
+ if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
return;
net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
ni->ni_interface,
sizeof(net_config->ni_interface));
- config->cfg_nid = ni->ni_nid;
+ config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
config->cfg_config_u.cfg_net.net_peer_timeout =
ni->ni_net->net_tunables.lct_peer_timeout;
config->cfg_config_u.cfg_net.net_max_tx_credits =
rc = lnet_udsp_apply_policies_on_ni(ni);
if (rc)
CERROR("Failed to apply UDSPs on ni %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
}
lnet_net_unlock(LNET_LOCK_EX);
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid || all) {
+ if (all || (nid_is_nid4(&ni->ni_nid) &&
+ lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
atomic_set(&ni->ni_healthv, value);
if (list_empty(&ni->ni_recovery) &&
value < LNET_MAX_HEALTH_VALUE) {
CERROR("manually adding local NI %s to recovery\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
list_add_tail(&ni->ni_recovery,
&the_lnet.ln_mt_localNIRecovq);
lnet_ni_addref_locked(ni, 0);
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid != nid && !all)
+ if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
continue;
if (LNET_NETTYP(net->net_id) == SOCKLND)
ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
- list->rlst_nid_array[i] = ni->ni_nid;
+ if (!nid_is_nid4(&ni->ni_nid))
+ continue;
+ list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
i++;
if (i >= LNET_MAX_SHOW_NUM_NID)
break;
cpt = lnet_net_lock_current();
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid) {
+ if (lnet_nid_to_nid4(&ni->ni_nid) == nid) {
lnet_net_unlock(cpt);
return true;
}
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ if (!nid_is_nid4(&ni->ni_nid))
+ /* FIXME this needs to be handled */
+ continue;
if (index-- != 0)
continue;
- id->nid = ni->ni_nid;
+ id->nid = lnet_nid_to_nid4(&ni->ni_nid);
id->pid = the_lnet.ln_pid;
rc = 0;
break;
if (ni->ni_interface != NULL) {
LCONSOLE_ERROR_MSG(0x115, "%s: interface %s already set for net %s: rc = %d\n",
iface, ni->ni_interface,
- libcfs_net2str(LNET_NIDNET(ni->ni_nid)),
+ libcfs_net2str(LNET_NID_NET(&ni->ni_nid)),
-EINVAL);
return -EINVAL;
}
ni->ni_net = net;
/* LND will fill in the address part of the NID */
- ni->ni_nid = LNET_MKNID(net->net_id, 0);
+ ni->ni_nid.nid_type = LNET_NETTYP(net->net_id);
+ ni->ni_nid.nid_num = cpu_to_be16(LNET_NETNUM(net->net_id));
/* Store net namespace in which current ni is being created */
if (current->nsproxy && current->nsproxy->net_ns)
int rc;
LASSERT(!in_interrupt());
- LASSERT(ni->ni_nid == LNET_NID_LO_0 ||
+ LASSERT(nid_is_lo0(&ni->ni_nid) ||
(msg->msg_txcredit && msg->msg_peertxcredit));
rc = (ni->ni_net->net_lnd->lnd_send)(ni, priv, msg);
/* can't get here if we're sending to the loopback interface */
if (the_lnet.ln_loni)
- LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
+ LASSERT(lp->lpni_nid !=
+ lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
/* NB 'lp' is always the next hop */
if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
* preferred, then let's use it
*/
if (best_ni) {
- lpni_is_preferred = lnet_peer_is_pref_nid_locked(lpni,
- best_ni->ni_nid);
+ /* FIXME need to handle large-addr nid */
+ lpni_is_preferred = lnet_peer_is_pref_nid_locked(
+ lpni, lnet_nid_to_nid4(&best_ni->ni_nid));
CDEBUG(D_NET, "%s lpni_is_preferred = %d\n",
- libcfs_nid2str(best_ni->ni_nid),
+ libcfs_nidstr(&best_ni->ni_nid),
lpni_is_preferred);
} else {
lpni_is_preferred = false;
if (best_ni)
CDEBUG(D_NET, "compare ni %s [c:%d, d:%d, s:%d, p:%u, g:%u] with best_ni %s [c:%d, d:%d, s:%d, p:%u, g:%u]\n",
- libcfs_nid2str(ni->ni_nid), ni_credits, distance,
+ libcfs_nidstr(&ni->ni_nid), ni_credits, distance,
ni->ni_seq, ni_sel_prio, ni_dev_prio,
- (best_ni) ? libcfs_nid2str(best_ni->ni_nid)
+ (best_ni) ? libcfs_nidstr(&best_ni->ni_nid)
: "not selected", best_credits, shortest_distance,
(best_ni) ? best_ni->ni_seq : 0,
best_sel_prio, best_dev_prio);
}
CDEBUG(D_NET, "selected best_ni %s\n",
- (best_ni) ? libcfs_nid2str(best_ni->ni_nid) : "no selection");
+ (best_ni) ? libcfs_nidstr(&best_ni->ni_nid) : "no selection");
return best_ni;
}
/* No send credit hassles with LOLND */
lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
- msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
+ msg->msg_hdr.dest_nid =
+ cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
if (!msg->msg_routing)
msg->msg_hdr.src_nid =
- cpu_to_le64(the_lnet.ln_loni->ni_nid);
- msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
+ cpu_to_le64(lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid));
+ msg->msg_target.nid = lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid);
lnet_msg_commit(msg, cpt);
msg->msg_txni = the_lnet.ln_loni;
best_ni->ni_net->net_seq++;
CDEBUG(D_NET, "%s NI seq info: [%d:%d:%d:%u] %s LPNI seq info [%d:%d:%d:%u]\n",
- libcfs_nid2str(best_ni->ni_nid),
+ libcfs_nidstr(&best_ni->ni_nid),
best_ni->ni_seq, best_ni->ni_net->net_seq,
atomic_read(&best_ni->ni_tx_credits),
best_ni->ni_sel_priority,
* originator and set it here.
*/
if (!msg->msg_routing)
- msg->msg_hdr.src_nid = cpu_to_le64(msg->msg_txni->ni_nid);
+ msg->msg_hdr.src_nid =
+ cpu_to_le64(lnet_nid_to_nid4(&msg->msg_txni->ni_nid));
if (routing) {
msg->msg_target_is_router = 1;
if (!rc)
CDEBUG(D_NET, "TRACE: %s(%s:%s) -> %s(%s:%s) %s : %s try# %d\n",
libcfs_nid2str(msg->msg_hdr.src_nid),
- libcfs_nid2str(msg->msg_txni->ni_nid),
+ libcfs_nidstr(&msg->msg_txni->ni_nid),
libcfs_nid2str(sd->sd_src_nid),
libcfs_nid2str(msg->msg_hdr.dest_nid),
libcfs_nid2str(sd->sd_dst_nid),
if (!lnet_peer_is_multi_rail(lpni->lpni_peer_net->lpn_peer) &&
!lnet_msg_is_response(msg) && lpni->lpni_pref_nnids == 0) {
CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
- libcfs_nid2str(lni->ni_nid),
+ libcfs_nidstr(&lni->ni_nid),
libcfs_nid2str(lpni->lpni_nid));
- lnet_peer_ni_set_non_mr_pref_nid(lpni, lni->ni_nid);
+ lnet_peer_ni_set_non_mr_pref_nid(
+ lpni, lnet_nid_to_nid4(&lni->ni_nid));
}
}
}
if (sd->sd_best_lpni &&
- sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
+ sd->sd_best_lpni->lpni_nid ==
+ lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid))
return lnet_handle_lo_send(sd);
else if (sd->sd_best_lpni)
return lnet_handle_send(sd);
struct lnet_peer_ni *gwni = NULL;
bool route_found = false;
lnet_nid_t src_nid = (sd->sd_src_nid != LNET_NID_ANY) ? sd->sd_src_nid :
- (sd->sd_best_ni != NULL) ? sd->sd_best_ni->ni_nid :
- LNET_NID_ANY;
+ (sd->sd_best_ni != NULL)
+ ? lnet_nid_to_nid4(&sd->sd_best_ni->ni_nid)
+ : LNET_NID_ANY;
int best_lpn_healthv = 0;
__u32 best_lpn_sel_prio = LNET_MAX_SELECTION_PRIORITY;
* network
*/
if (sd->sd_best_lpni &&
- sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
+ sd->sd_best_lpni->lpni_nid ==
+ lnet_nid_to_nid4(&the_lnet.ln_loni->ni_nid)) {
/*
* in case we initially started with a routed
* destination, let's reset to local
lnet_net_unlock(0);
CDEBUG(D_NET, "attempting to recover local ni: %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
lnet_ni_lock(ni);
if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
if (!ev_info) {
CERROR("out of memory. Can't recover %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
lnet_ni_lock(ni);
ni->ni_recovery_state &=
~LNET_NI_RECOVERY_PENDING;
* We'll unlink the mdh in this case below.
*/
LNetInvalidateMDHandle(&ni->ni_ping_mdh);
- nid = ni->ni_nid;
+ /* FIXME need to handle large-addr nid */
+ nid = lnet_nid_to_nid4(&ni->ni_nid);
/*
* remove the NI from the local queue and drop the
lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
msg->msg_receiving = 0;
- rc = lnet_send(ni->ni_nid, msg, msg->msg_from);
+ /* FIXME need to handle large-addr nid */
+ rc = lnet_send(lnet_nid_to_nid4(&ni->ni_nid), msg, msg->msg_from);
if (rc < 0) {
/* didn't get as far as lnet_ni_send() */
CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
libcfs_id2str(info.mi_id), rc);
lnet_finalize(msg, rc);
if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
CNETERR("%s: Dropping REPLY from %s for %s "
"MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
(md == NULL) ? "invalid" : "inactive",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie);
(md->md_options & LNET_MD_TRUNCATE) == 0) {
CNETERR("%s: Dropping REPLY from %s length %d "
"for MD %#llx would overflow (%d)\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
mlength);
lnet_res_unlock(cpt);
}
CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
lnet_msg_attach_md(msg, md, 0, mlength);
/* Don't moan; this is expected */
CDEBUG(D_NET,
"%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
(md == NULL) ? "invalid" : "inactive",
hdr->msg.ack.dst_wmd.wh_interface_cookie,
hdr->msg.ack.dst_wmd.wh_object_cookie);
}
CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(src),
hdr->msg.ack.dst_wmd.wh_object_cookie);
lnet_msg_attach_md(msg, md, 0, 0);
dest_pid = le32_to_cpu(hdr->dest_pid);
payload_length = le32_to_cpu(hdr->payload_length);
- for_me = (ni->ni_nid == dest_nid);
+ /* FIXME handle large-addr nids */
+ for_me = (lnet_nid_to_nid4(&ni->ni_nid) == dest_nid);
cpt = lnet_cpt_of_nid(from_nid, ni);
CDEBUG(D_NET, "TRACE: %s(%s) <- %s : %s - %s\n",
libcfs_nid2str(dest_nid),
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
libcfs_nid2str(src_nid),
lnet_msgtyp2str(type),
(for_me) ? "for me" : "routed");
* or malicious so we chop them off at the knees :) */
if (!for_me) {
- if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
+ if (LNET_NIDNET(dest_nid) == LNET_NID_NET(&ni->ni_nid)) {
/* should have gone direct */
CERROR("%s, src %s: Bad dest nid %s "
"(should have been sent direct)\n",
goto drop;
}
+ /* FIXME need to support large-addr nid */
if (!list_empty(&the_lnet.ln_drop_rules) &&
- lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
+ lnet_drop_rule_match(hdr, lnet_nid_to_nid4(&ni->ni_nid), NULL)) {
CDEBUG(D_NET,
"%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
}
lnet_net_lock(cpt);
- lpni = lnet_nid2peerni_locked(from_nid, ni->ni_nid, cpt);
+ /* FIXME support large-addr nid */
+ lpni = lnet_nid2peerni_locked(from_nid, lnet_nid_to_nid4(&ni->ni_nid),
+ cpt);
if (IS_ERR(lpni)) {
lnet_net_unlock(cpt);
CERROR("%s, src %s: Dropping %s "
if (msg == NULL) {
CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id));
goto drop;
}
if (getmd->md_threshold == 0) {
CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id),
getmd);
lnet_res_unlock(cpt);
goto drop;
LASSERT(getmd->md_offset == 0);
CDEBUG(D_NET, "%s: Reply from %s md %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
+ libcfs_nidstr(&ni->ni_nid), libcfs_id2str(peer_id), getmd);
/* setup information for lnet_build_msg_event */
msg->msg_initiator = getmsg->msg_txpeer->lpni_peer_net->lpn_peer->lp_primary_nid;
cpt = lnet_net_lock_current();
while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
- if (ni->ni_nid == dstnid) {
+ /* FIXME support large-addr nid */
+ if (lnet_nid_to_nid4(&ni->ni_nid) == dstnid) {
if (srcnidp != NULL)
*srcnidp = dstnid;
if (orderp != NULL) {
return local_nid_dist_zero ? 0 : 1;
}
- if (!matched_dstnet && LNET_NIDNET(ni->ni_nid) == dstnet) {
+ if (!matched_dstnet && LNET_NID_NET(&ni->ni_nid) == dstnet) {
matched_dstnet = true;
/* We matched the destination net, but we may have
* additional local NIs to inspect.
* they may be overwritten if we match local NI above.
*/
if (srcnidp)
- *srcnidp = ni->ni_nid;
+ /* FIXME support large-addr nids */
+ *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
if (orderp) {
/* Check if ni was originally created in
net = lnet_get_net_locked(shortest->lr_lnet);
LASSERT(net);
ni = lnet_get_next_ni_locked(net, NULL);
- *srcnidp = ni->ni_nid;
+ /* FIXME support large-addr nids */
+ *srcnidp = lnet_nid_to_nid4(&ni->ni_nid);
}
if (orderp != NULL)
*orderp = order;
lnet_ni_set_next_ping(ni, now);
CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld health :%d\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
ni->ni_ping_count,
ni->ni_next_ping,
atomic_read(&ni->ni_healthv));
* if we're sending to the LOLND then the msg_txpeer will not be
* set. So no need to sanity check it.
*/
- if (msg->msg_tx_committed && msg->msg_txni->ni_nid != LNET_NID_LO_0)
+ if (msg->msg_tx_committed &&
+ !nid_is_lo0(&msg->msg_txni->ni_nid))
LASSERT(msg->msg_txpeer);
else if (msg->msg_tx_committed &&
- msg->msg_txni->ni_nid == LNET_NID_LO_0)
+ nid_is_lo0(&msg->msg_txni->ni_nid))
lo = true;
if (hstatus != LNET_MSG_STATUS_OK &&
LASSERT(ni);
CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
(lo) ? "self" : libcfs_nid2str(lpni->lpni_nid),
lnet_msgtyp2str(msg->msg_type),
lnet_health_error2str(hstatus));
CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
libcfs_nid2str(msg->msg_hdr.src_nid),
- libcfs_nid2str(msg->msg_txni->ni_nid),
+ libcfs_nidstr(&msg->msg_txni->ni_nid),
libcfs_nid2str(msg->msg_hdr.dest_nid),
lnet_msgtyp2str(msg->msg_type),
lnet_health_error2str(*hstatus));
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct sockaddr_in6 *sin6 = (void *)&locaddr;
+ int val = 0;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = in6addr_any;
+
+ /* Make sure we get both IPv4 and IPv6 connections.
+ * This is the default, but it can be overridden so we
+ * force it back.
+ */
+#ifdef HAVE_KERNEL_SETSOCKOPT
+ kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+ (char *) &val, sizeof(val));
+#elif defined(_LINUX_SOCKPTR_H)
+ /* sockptr_t was introduced around
+ * v5.8-rc4-1952-ga7b75c5a8c41 and allows a
+ * kernel address to be passed to ->setsockopt
+ */
+ if (ipv6_only_sock(sock->sk)) {
+ sockptr_t optval = KERNEL_SOCKPTR(&val);
+
+ sock->ops->setsockopt(sock,
+ IPPROTO_IPV6, IPV6_V6ONLY,
+ optval, sizeof(val));
+ }
+#else
+ /* From v5.7-rc6-2614-g5a892ff2facb when
+ * kernel_setsockopt() was removed until
+ * sockptr_t (above) there is no clean way to
+ * pass kernel address to setsockopt. We could
+ * use get_fs()/set_fs(), but in this particular
+ * situation there is an easier way. It depends
+ * on the fact that at least for these few
+ * kernels a NULL address to ipv6_setsockopt()
+ * is treated like the address of a zero.
+ */
+ if (ipv6_only_sock(sock->sk) && !val) {
+ void *optval = NULL;
+
+ sock->ops->setsockopt(sock,
+ IPPROTO_IPV6, IPV6_V6ONLY,
+ optval, sizeof(val));
+ }
+#endif /* HAVE_KERNEL_SETSOCKOPT */
+
if (interface >= 0 && remaddr) {
struct sockaddr_in6 *rem = (void *)remaddr;
lnet_sock_listen(int local_port, int backlog, struct net *ns)
{
struct socket *sock;
- int val = 0;
int rc;
sock = lnet_sock_create(-1, NULL, local_port, ns);
return ERR_PTR(rc);
}
- /* Make sure we get both IPv4 and IPv6 connections.
- * This is the default, but it can be overridden so
- * we force it back.
- */
-#ifdef HAVE_KERNEL_SETSOCKOPT
- kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
- (char *) &val, sizeof(val));
-#elif defined(_LINUX_SOCKPTR_H)
- /* sockptr_t was introduced around v5.8-rc4-1952-ga7b75c5a8c41
- * and allows a kernel address to be passed to ->setsockopt
- */
- if (ipv6_only_sock(sock->sk)) {
- sockptr_t optval = KERNEL_SOCKPTR(&val);
- sock->ops->setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
- optval, sizeof(val));
- }
-#else
- /* From v5.7-rc6-2614-g5a892ff2facb when kernel_setsockopt()
- * was removed until sockptr_t (above) there is no clean
- * way to pass kernel address to setsockopt. We could use
- * get_fs()/set_fs(), but in this particular situation there
- * is an easier way.
- * It depends on the fact that at least for these few kernels
- * a NULL address to ipv6_setsockopt() is treated like the address
- * of a zero.
- */
- if (ipv6_only_sock(sock->sk) && !val) {
- void *optval = NULL;
- sock->ops->setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
- optval, sizeof(val));
- }
-#endif
-
rc = kernel_listen(sock, backlog);
if (rc == 0)
return sock;
LASSERT(!lntmsg->msg_routing);
LASSERT(!lntmsg->msg_target_is_router);
- return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
+ return lnet_parse(ni, &lntmsg->msg_hdr,
+ lnet_nid_to_nid4(&ni->ni_nid), lntmsg, 0);
}
static int
list_del_init(&msg->msg_list);
ni = msg->msg_txni;
CDEBUG(D_NET, "TRACE: msg %p %s -> %s : %s\n", msg,
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
libcfs_nid2str(msg->msg_txpeer->lpni_nid),
lnet_msgtyp2str(msg->msg_type));
lnet_ni_send(ni, msg);
}
EXPORT_SYMBOL(libcfs_nid2str_r);
+char *
+libcfs_nidstr_r(const struct lnet_nid *nid, char *buf, size_t buf_size)
+{
+ __u32 nnum = be16_to_cpu(nid->nid_num);
+ __u32 lnd = nid->nid_type;
+ struct netstrfns *nf;
+
+ if (LNET_NID_IS_ANY(nid)) {
+ strncpy(buf, "<?>", buf_size);
+ buf[buf_size - 1] = '\0';
+ return buf;
+ }
+
+ nf = libcfs_lnd2netstrfns(lnd);
+ if (nf && nid_is_nid4(nid)) {
+ size_t addr_len;
+
+ nf->nf_addr2str(ntohl(nid->nid_addr[0]), buf, buf_size);
+ addr_len = strlen(buf);
+ if (nnum == 0)
+ snprintf(buf + addr_len, buf_size - addr_len, "@%s",
+ nf->nf_name);
+ else
+ snprintf(buf + addr_len, buf_size - addr_len, "@%s%u",
+ nf->nf_name, nnum);
+ } else {
+ int l = 0;
+ int words = DIV_ROUND_UP(NID_ADDR_BYTES(nid), 4);
+ int i;
+
+ for (i = 0; i < words && i < 4; i++)
+ l = snprintf(buf+l, buf_size-l, "%s%x",
+ i ? ":" : "", ntohl(nid->nid_addr[i]));
+ snprintf(buf+l, buf_size-l, "@<%u:%u>", lnd, nnum);
+ }
+
+ return buf;
+}
+EXPORT_SYMBOL(libcfs_nidstr_r);
+
static struct netstrfns *
libcfs_str2net_internal(const char *str, __u32 *net)
{
LASSERT(!in_interrupt());
CDEBUG(D_NET, "%s notifying %s: %s\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ (ni == NULL) ? "userspace" : libcfs_nidstr(&ni->ni_nid),
libcfs_nid2str(nid), alive ? "up" : "down");
if (ni != NULL &&
- LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
+ LNET_NID_NET(&ni->ni_nid) != LNET_NIDNET(nid)) {
CWARN("Ignoring notification of %s %s by %s (different net)\n",
libcfs_nid2str(nid), alive ? "birth" : "death",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
return -EINVAL;
}
/* can't do predictions... */
if (when > now) {
- CWARN("Ignoring prediction from %s of %s %s "
- "%lld seconds in the future\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid), alive ? "up" : "down", when - now);
+ CWARN("Ignoring prediction from %s of %s %s %lld seconds in the future\n",
+ ni ? libcfs_nidstr(&ni->ni_nid) : "userspace",
+ libcfs_nid2str(nid), alive ? "up" : "down", when - now);
return -EINVAL;
}
s += scnprintf(s, tmpstr + tmpsiz - s,
"%-24s %6s %5lld %4d %4d %4d %5d %5d %5d\n",
- libcfs_nid2str(ni->ni_nid), stat,
+ libcfs_nidstr(&ni->ni_nid), stat,
last_alive, *ni->ni_refs[i],
ni->ni_net->net_tunables.lct_peer_tx_credits,
ni->ni_net->net_tunables.lct_peer_rtr_credits,
struct lnet_ud_nid_descr *ni_match = udi->udi_match;
__u32 priority = (udi->udi_revert) ? -1 : udi->udi_priority;
- rc = cfs_match_nid_net(ni->ni_nid,
+ rc = cfs_match_nid_net(
+ lnet_nid_to_nid4(&ni->ni_nid),
ni_match->ud_net_id.udn_net_type,
&ni_match->ud_net_id.udn_net_num_range,
&ni_match->ud_addr_range);
return 0;
CDEBUG(D_NET, "apply udsp on ni %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
/* Detected match. Set NIDs priority */
lnet_ni_set_sel_priority_locked(ni, priority);
if (LNET_NETTYP(net->net_id) != ni_action->ud_net_id.udn_net_type)
continue;
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- rc = cfs_match_nid_net(ni->ni_nid,
+ rc = cfs_match_nid_net(
+ lnet_nid_to_nid4(&ni->ni_nid),
ni_action->ud_net_id.udn_net_type,
&ni_action->ud_net_id.udn_net_num_range,
&ni_action->ud_addr_range);
}
}
CDEBUG(D_NET, "add nid %s as preferred for peer %s\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
libcfs_nid2str(lpni->lpni_nid));
/* match. Add to pref NIDs */
- rc = lnet_peer_add_pref_nid(lpni, ni->ni_nid);
+ rc = lnet_peer_add_pref_nid(
+ lpni, lnet_nid_to_nid4(&ni->ni_nid));
lnet_net_lock(LNET_LOCK_EX);
/* success if EEXIST return */
if (rc && rc != -EEXIST) {
CERROR("Failed to add %s to %s pref nid list\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
libcfs_nid2str(lpni->lpni_nid));
return rc;
}