*
*/
-#include "openibnal.h"
+#include "openiblnd.h"
-nal_t koibnal_api;
-ptl_handle_ni_t koibnal_ni;
-koib_data_t koibnal_data;
-koib_tunables_t koibnal_tunables;
+lnd_t the_kiblnd = {
+#ifdef USING_TSAPI
+ .lnd_type = CIBLND,
+#else
+ .lnd_type = OPENIBLND,
+#endif
+ .lnd_startup = kibnal_startup,
+ .lnd_shutdown = kibnal_shutdown,
+ .lnd_ctl = kibnal_ctl,
+ .lnd_send = kibnal_send,
+ .lnd_recv = kibnal_recv,
+ .lnd_eager_recv = kibnal_eager_recv,
+ .lnd_accept = kibnal_accept,
+};
-#ifdef CONFIG_SYSCTL
-#define OPENIBNAL_SYSCTL 202
+kib_data_t kibnal_data;
-#define OPENIBNAL_SYSCTL_TIMEOUT 1
-#define OPENIBNAL_SYSCTL_ZERO_COPY 2
+__u32
+kibnal_cksum (void *ptr, int nob)
+{
+ char *c = ptr;
+ __u32 sum = 0;
-static ctl_table koibnal_ctl_table[] = {
- {OPENIBNAL_SYSCTL_TIMEOUT, "timeout",
- &koibnal_tunables.koib_io_timeout, sizeof (int),
- 0644, NULL, &proc_dointvec},
- { 0 }
-};
+ while (nob-- > 0)
+ sum = ((sum << 1) | (sum >> 31)) + *c++;
-static ctl_table koibnal_top_ctl_table[] = {
- {OPENIBNAL_SYSCTL, "openibnal", NULL, 0, 0555, koibnal_ctl_table},
- { 0 }
-};
-#endif
+ /* ensure I don't return 0 (== no checksum) */
+ return (sum == 0) ? 1 : sum;
+}
void
-print_service(struct ib_common_attrib_service *service, char *tag, int rc)
+kibnal_init_msg(kib_msg_t *msg, int type, int body_nob)
{
- char name[32];
-
- if (service == NULL)
- {
- CWARN("tag : %s\n"
- "status : %d (NULL)\n", tag, rc);
- return;
- }
- strncpy (name, service->service_name, sizeof(name)-1);
- name[sizeof(name)-1] = 0;
-
- CWARN("tag : %s\n"
- "status : %d\n"
- "service id: "LPX64"\n"
- "name : %s\n"
- "NID : "LPX64"\n", tag, rc,
- service->service_id, name, service->service_data64[0]);
+ msg->ibm_type = type;
+ msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
}
void
-koibnal_service_setunset_done (tTS_IB_CLIENT_QUERY_TID tid, int status,
- struct ib_common_attrib_service *service, void *arg)
+kibnal_pack_msg(kib_msg_t *msg, int version, int credits,
+ lnet_nid_t dstnid, __u64 dststamp)
{
- *(int *)arg = status;
- up (&koibnal_data.koib_nid_signal);
+ /* CAVEAT EMPTOR! all message fields not set here should have been
+ * initialised previously. */
+ msg->ibm_magic = IBNAL_MSG_MAGIC;
+ msg->ibm_version = version;
+ /* ibm_type */
+ msg->ibm_credits = credits;
+ /* ibm_nob */
+ msg->ibm_cksum = 0;
+ msg->ibm_srcnid = lnet_ptlcompat_srcnid(kibnal_data.kib_ni->ni_nid,
+ dstnid);
+ msg->ibm_srcstamp = kibnal_data.kib_incarnation;
+ msg->ibm_dstnid = dstnid;
+ msg->ibm_dststamp = dststamp;
+
+ if (*kibnal_tunables.kib_cksum) {
+ /* NB ibm_cksum zero while computing cksum */
+ msg->ibm_cksum = kibnal_cksum(msg, msg->ibm_nob);
+ }
}
int
-koibnal_advertise (void)
+kibnal_unpack_msg(kib_msg_t *msg, int expected_version, int nob)
{
- __u64 tid;
- int rc;
- int rc2;
+ const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ __u32 msg_cksum;
+ int msg_version;
+ int flip;
+ int msg_nob;
+
+ if (nob < 6) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
+ }
- LASSERT (koibnal_data.koib_nid != PTL_NID_ANY);
+ if (msg->ibm_magic == IBNAL_MSG_MAGIC) {
+ flip = 0;
+ } else if (msg->ibm_magic == __swab32(IBNAL_MSG_MAGIC)) {
+ flip = 1;
+ } else {
+ CERROR("Bad magic: %08x\n", msg->ibm_magic);
+ return -EPROTO;
+ }
- memset (&koibnal_data.koib_service, 0,
- sizeof (koibnal_data.koib_service));
-
- koibnal_data.koib_service.service_id
- = koibnal_data.koib_cm_service_id;
+ msg_version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
+ if ((expected_version == 0) ?
+ (msg_version != IBNAL_MSG_VERSION &&
+ msg_version != IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD) :
+ (msg_version != expected_version)) {
+ CERROR("Bad version: %x\n", msg_version);
+ return -EPROTO;
+ }
- rc = ib_cached_gid_get(koibnal_data.koib_device,
- koibnal_data.koib_port,
- 0,
- koibnal_data.koib_service.service_gid);
- if (rc != 0) {
- CERROR ("Can't get port %d GID: %d\n",
- koibnal_data.koib_port, rc);
- return (rc);
+ if (nob < hdr_size) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
}
-
- rc = ib_cached_pkey_get(koibnal_data.koib_device,
- koibnal_data.koib_port,
- 0,
- &koibnal_data.koib_service.service_pkey);
- if (rc != 0) {
- CERROR ("Can't get port %d PKEY: %d\n",
- koibnal_data.koib_port, rc);
- return (rc);
+
+ msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
+ if (msg_nob > nob) {
+ CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
+ return -EPROTO;
}
+
+ /* checksum must be computed with ibm_cksum zero and BEFORE anything
+ * gets flipped */
+ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
+ msg->ibm_cksum = 0;
+ if (msg_cksum != 0 &&
+ msg_cksum != kibnal_cksum(msg, msg_nob)) {
+ CERROR("Bad checksum\n");
+ return -EPROTO;
+ }
+ msg->ibm_cksum = msg_cksum;
- koibnal_data.koib_service.service_lease = 0xffffffff;
-
- koibnal_set_service_keys(&koibnal_data.koib_service, koibnal_data.koib_nid);
-
- CDEBUG(D_NET, "Advertising service id "LPX64" %s:"LPX64"\n",
- koibnal_data.koib_service.service_id,
- koibnal_data.koib_service.service_name,
- *koibnal_service_nid_field(&koibnal_data.koib_service));
-
- rc = ib_service_set (koibnal_data.koib_device,
- koibnal_data.koib_port,
- &koibnal_data.koib_service,
- IB_SA_SERVICE_COMP_MASK_ID |
- IB_SA_SERVICE_COMP_MASK_GID |
- IB_SA_SERVICE_COMP_MASK_PKEY |
- IB_SA_SERVICE_COMP_MASK_LEASE |
- KOIBNAL_SERVICE_KEY_MASK,
- koibnal_tunables.koib_io_timeout * HZ,
- koibnal_service_setunset_done, &rc2, &tid);
-
- if (rc == 0) {
- down (&koibnal_data.koib_nid_signal);
- rc = rc2;
+ if (flip) {
+ /* leave magic unflipped as a clue to peer endianness */
+ msg->ibm_version = msg_version;
+ LASSERT (sizeof(msg->ibm_type) == 1);
+ LASSERT (sizeof(msg->ibm_credits) == 1);
+ msg->ibm_nob = msg_nob;
+ __swab64s(&msg->ibm_srcnid);
+ __swab64s(&msg->ibm_srcstamp);
+ __swab64s(&msg->ibm_dstnid);
+ __swab64s(&msg->ibm_dststamp);
}
- if (rc != 0)
- CERROR ("Error %d advertising SM service\n", rc);
+ if (msg->ibm_srcnid == LNET_NID_ANY) {
+ CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
+ return -EPROTO;
+ }
- return (rc);
+ switch (msg->ibm_type) {
+ default:
+ CERROR("Unknown message type %x\n", msg->ibm_type);
+ return -EPROTO;
+
+ case IBNAL_MSG_SVCQRY:
+ case IBNAL_MSG_NOOP:
+ break;
+
+ case IBNAL_MSG_SVCRSP:
+ if (msg_nob < hdr_size + sizeof(msg->ibm_u.svcrsp)) {
+ CERROR("Short SVCRSP: %d(%d)\n", msg_nob,
+ (int)(hdr_size + sizeof(msg->ibm_u.svcrsp)));
+ return -EPROTO;
+ }
+ if (flip) {
+ __swab64s(&msg->ibm_u.svcrsp.ibsr_svc_id);
+ __swab16s(&msg->ibm_u.svcrsp.ibsr_svc_pkey);
+ }
+ break;
+
+ case IBNAL_MSG_CONNREQ:
+ case IBNAL_MSG_CONNACK:
+ if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
+ CERROR("Short CONNREQ: %d(%d)\n", msg_nob,
+ (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
+ return -EPROTO;
+ }
+ if (flip)
+ __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);
+ break;
+
+ case IBNAL_MSG_IMMEDIATE:
+ if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
+ CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
+ (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
+ return -EPROTO;
+ }
+ break;
+
+ case IBNAL_MSG_PUT_RDMA:
+ case IBNAL_MSG_GET_RDMA:
+ if (msg_nob < hdr_size + sizeof(msg->ibm_u.rdma)) {
+ CERROR("Short RDMA req: %d(%d)\n", msg_nob,
+ (int)(hdr_size + sizeof(msg->ibm_u.rdma)));
+ return -EPROTO;
+ }
+ if (flip) {
+ __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_key);
+ __swab32s(&msg->ibm_u.rdma.ibrm_desc.rd_nob);
+ __swab64s(&msg->ibm_u.rdma.ibrm_desc.rd_addr);
+ }
+ break;
+
+ case IBNAL_MSG_PUT_DONE:
+ case IBNAL_MSG_GET_DONE:
+ if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
+ CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
+ (int)(hdr_size + sizeof(msg->ibm_u.completion)));
+ return -EPROTO;
+ }
+ if (flip)
+ __swab32s(&msg->ibm_u.completion.ibcm_status);
+ break;
+ }
+ return 0;
}
int
-koibnal_unadvertise (int expect_success)
+kibnal_make_svcqry (kib_conn_t *conn)
{
- __u64 tid;
- int rc;
- int rc2;
+ kib_peer_t *peer = conn->ibc_peer;
+ int version = IBNAL_MSG_VERSION;
+ int msg_version;
+ kib_msg_t *msg;
+ struct socket *sock;
+ int rc;
+ int nob;
+
+ LASSERT (conn->ibc_connreq != NULL);
+ msg = &conn->ibc_connreq->cr_msg;
- LASSERT (koibnal_data.koib_nid != PTL_NID_ANY);
+ again:
+ kibnal_init_msg(msg, IBNAL_MSG_SVCQRY, 0);
+ kibnal_pack_msg(msg, version, 0, peer->ibp_nid, 0);
- memset (&koibnal_data.koib_service, 0,
- sizeof (koibnal_data.koib_service));
+ rc = lnet_connect(&sock, peer->ibp_nid,
+ 0, peer->ibp_ip, peer->ibp_port);
+ if (rc != 0)
+ return -ECONNABORTED;
+
+ rc = libcfs_sock_write(sock, msg, msg->ibm_nob,
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CERROR("Error %d sending svcqry to %s at %u.%u.%u.%u/%d\n",
+ rc, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ goto out;
+ }
- koibnal_set_service_keys(&koibnal_data.koib_service, koibnal_data.koib_nid);
+ /* The first 6 bytes are invariably MAGIC + proto version */
+ rc = libcfs_sock_read(sock, msg, 6, *kibnal_tunables.kib_timeout);
+ if (rc != 0) {
+ CERROR("Error %d receiving svcrsp from %s at %u.%u.%u.%u/%d\n",
+ rc, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ goto out;
+ }
- CDEBUG(D_NET, "Unadvertising service %s:"LPX64"\n",
- koibnal_data.koib_service.service_name,
- *koibnal_service_nid_field(&koibnal_data.koib_service));
+ if (msg->ibm_magic != IBNAL_MSG_MAGIC &&
+ msg->ibm_magic != __swab32(IBNAL_MSG_MAGIC)) {
+ CERROR("Bad magic: %08x from %s at %u.%u.%u.%u/%d\n",
+ msg->ibm_magic, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ rc = -EPROTO;
+ goto out;
+ }
- rc = ib_service_delete (koibnal_data.koib_device,
- koibnal_data.koib_port,
- &koibnal_data.koib_service,
- KOIBNAL_SERVICE_KEY_MASK,
- koibnal_tunables.koib_io_timeout * HZ,
- koibnal_service_setunset_done, &rc2, &tid);
+ msg_version = (msg->ibm_magic == IBNAL_MSG_MAGIC) ?
+ msg->ibm_version : __swab16(msg->ibm_version);
+ if (msg_version != version) {
+ if (version == IBNAL_MSG_VERSION) {
+ /* retry with previous version */
+ libcfs_sock_release(sock);
+ version = IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD;
+ goto again;
+ }
+
+ CERROR("Bad version %x from %s at %u.%u.%u.%u/%d\n",
+ msg_version, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ rc = -EPROTO;
+ goto out;
+ }
+
+ /* Read in the rest of the message now we know the expected format */
+ nob = offsetof(kib_msg_t, ibm_u) + sizeof(kib_svcrsp_t);
+ rc = libcfs_sock_read(sock, ((char *)msg) + 6, nob - 6,
+ *kibnal_tunables.kib_timeout);
if (rc != 0) {
- CERROR ("Immediate error %d unadvertising NID "LPX64"\n",
- rc, koibnal_data.koib_nid);
- return (rc);
+ CERROR("Error %d receiving svcrsp from %s at %u.%u.%u.%u/%d\n",
+ rc, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ goto out;
}
- down (&koibnal_data.koib_nid_signal);
+ rc = kibnal_unpack_msg(msg, version, nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking svcrsp from %s at %u.%u.%u.%u/%d\n",
+ rc, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ goto out;
+ }
+
+ if (msg->ibm_type != IBNAL_MSG_SVCRSP) {
+ CERROR("Unexpected response type %d from %s at %u.%u.%u.%u/%d\n",
+ msg->ibm_type, libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ rc = -EPROTO;
+ goto out;
+ }
- if ((rc2 == 0) == !!expect_success)
- return (0);
+ if (!lnet_ptlcompat_matchnid(kibnal_data.kib_ni->ni_nid,
+ msg->ibm_dstnid) ||
+ msg->ibm_dststamp != kibnal_data.kib_incarnation) {
+ CERROR("Unexpected dst NID/stamp %s/"LPX64" from "
+ "%s at %u.%u.%u.%u/%d\n",
+ libcfs_nid2str(msg->ibm_dstnid), msg->ibm_dststamp,
+ libcfs_nid2str(peer->ibp_nid), HIPQUAD(peer->ibp_ip),
+ peer->ibp_port);
+ rc = -EPROTO;
+ goto out;
+ }
- if (expect_success)
- CERROR("Error %d unadvertising NID "LPX64"\n",
- rc, koibnal_data.koib_nid);
- else
- CWARN("Removed conflicting NID "LPX64"\n",
- koibnal_data.koib_nid);
+ if (!lnet_ptlcompat_matchnid(peer->ibp_nid, msg->ibm_srcnid)) {
+ CERROR("Unexpected src NID %s from %s at %u.%u.%u.%u/%d\n",
+ libcfs_nid2str(msg->ibm_srcnid),
+ libcfs_nid2str(peer->ibp_nid),
+ HIPQUAD(peer->ibp_ip), peer->ibp_port);
+ rc = -EPROTO;
+ goto out;
+ }
- return (rc);
+ conn->ibc_incarnation = msg->ibm_srcstamp;
+ conn->ibc_connreq->cr_svcrsp = msg->ibm_u.svcrsp;
+ conn->ibc_version = version;
+
+ out:
+ libcfs_sock_release(sock);
+ return rc;
}
-int
-koibnal_check_advert (void)
+void
+kibnal_handle_svcqry (struct socket *sock)
{
- __u64 tid;
- int rc;
- int rc2;
+ __u32 peer_ip;
+ unsigned int peer_port;
+ kib_msg_t *msg;
+ __u64 srcnid;
+ __u64 srcstamp;
+ int version;
+ int reject = 0;
+ int rc;
+
+ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+ if (rc != 0) {
+ CERROR("Can't get peer's IP: %d\n", rc);
+ return;
+ }
- static struct ib_common_attrib_service srv;
+ LIBCFS_ALLOC(msg, sizeof(*msg));
+ if (msg == NULL) {
+ CERROR("Can't allocate msgs for %u.%u.%u.%u/%d\n",
+ HIPQUAD(peer_ip), peer_port);
+ return;
+ }
+
+ rc = libcfs_sock_read(sock, &msg->ibm_magic, sizeof(msg->ibm_magic),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CERROR("Error %d receiving svcqry(1) from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
- memset (&srv, 0, sizeof (srv));
+ if (msg->ibm_magic != IBNAL_MSG_MAGIC &&
+ msg->ibm_magic != __swab32(IBNAL_MSG_MAGIC)) {
+ /* Unexpected magic! */
+ if (the_lnet.ln_ptlcompat == 0) {
+ if (msg->ibm_magic == LNET_PROTO_MAGIC ||
+ msg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) {
+ /* future protocol version compatibility!
+ * When LNET unifies protocols over all LNDs,
+ * the first thing sent will be a version
+ * query. I send back a reply in my current
+ * protocol to tell her I'm "old" */
+ kibnal_init_msg(msg, 0, 0);
+ kibnal_pack_msg(msg, IBNAL_MSG_VERSION, 0,
+ LNET_NID_ANY, 0);
+ reject = 1;
+ goto reply;
+ }
- koibnal_set_service_keys(&srv, koibnal_data.koib_nid);
+ CERROR ("Bad magic(1) %#08x (%#08x expected) from "
+ "%u.%u.%u.%u/%d\n", msg->ibm_magic,
+ IBNAL_MSG_MAGIC, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
- rc = ib_service_get (koibnal_data.koib_device,
- koibnal_data.koib_port,
- &srv,
- KOIBNAL_SERVICE_KEY_MASK,
- koibnal_tunables.koib_io_timeout * HZ,
- koibnal_service_setunset_done, &rc2,
- &tid);
+ /* When portals compatibility is set, I may be passed a new
+ * connection "blindly" by the acceptor, and I have to
+ * determine if my peer has sent an acceptor connection request
+ * or not. */
+ rc = lnet_accept(kibnal_data.kib_ni, sock, msg->ibm_magic);
+ if (rc != 0)
+ goto out;
+ /* It was an acceptor connection request!
+ * Now I should see my magic... */
+ rc = libcfs_sock_read(sock, &msg->ibm_magic,
+ sizeof(msg->ibm_magic),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CERROR("Error %d receiving svcqry(2) from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+
+ if (msg->ibm_magic != IBNAL_MSG_MAGIC &&
+ msg->ibm_magic != __swab32(IBNAL_MSG_MAGIC)) {
+ CERROR ("Bad magic(2) %#08x (%#08x expected) from "
+ "%u.%u.%u.%u/%d\n", msg->ibm_magic,
+ IBNAL_MSG_MAGIC, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+ }
+
+ /* Now check version */
+
+ rc = libcfs_sock_read(sock, &msg->ibm_version, sizeof(msg->ibm_version),
+ lnet_acceptor_timeout());
if (rc != 0) {
- CERROR ("Immediate error %d checking SM service\n", rc);
- } else {
- down (&koibnal_data.koib_nid_signal);
- rc = rc2;
+ CERROR("Error %d receiving svcqry(3) from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
- if (rc != 0)
- CERROR ("Error %d checking SM service\n", rc);
+ version = (msg->ibm_magic == IBNAL_MSG_MAGIC) ?
+ msg->ibm_version : __swab16(msg->ibm_version);
+ /* Peer is a different protocol version: reply in my current protocol
+ * to tell her I'm "old" */
+ if (version != IBNAL_MSG_VERSION &&
+ version != IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD) {
+ kibnal_init_msg(msg, 0, 0);
+ kibnal_pack_msg(msg, IBNAL_MSG_VERSION, 0, LNET_NID_ANY, 0);
+ reject = 1;
+ goto reply;
+ }
+
+ /* Now read in all the rest */
+ rc = libcfs_sock_read(sock, &msg->ibm_type,
+ offsetof(kib_msg_t, ibm_u) -
+ offsetof(kib_msg_t, ibm_type),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CERROR("Error %d receiving svcqry(4) from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+
+ rc = kibnal_unpack_msg(msg, version, offsetof(kib_msg_t, ibm_u));
+ if (rc != 0) {
+ CERROR("Error %d unpacking svcqry from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+
+ if (msg->ibm_type != IBNAL_MSG_SVCQRY) {
+ CERROR("Unexpected message %d from %u.%u.%u.%u/%d\n",
+ msg->ibm_type, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+
+ if (!lnet_ptlcompat_matchnid(kibnal_data.kib_ni->ni_nid,
+ msg->ibm_dstnid)) {
+ CERROR("Unexpected dstnid %s: expected %s from %u.%u.%u.%u/%d\n",
+ libcfs_nid2str(msg->ibm_dstnid),
+ libcfs_nid2str(kibnal_data.kib_ni->ni_nid),
+ HIPQUAD(peer_ip), peer_port);
+ goto out;
}
- return (rc);
+ srcnid = msg->ibm_srcnid;
+ srcstamp = msg->ibm_srcstamp;
+
+ kibnal_init_msg(msg, IBNAL_MSG_SVCRSP, sizeof(msg->ibm_u.svcrsp));
+
+ msg->ibm_u.svcrsp.ibsr_svc_id = kibnal_data.kib_svc_id;
+ memcpy(msg->ibm_u.svcrsp.ibsr_svc_gid, kibnal_data.kib_svc_gid,
+ sizeof(kibnal_data.kib_svc_gid));
+ msg->ibm_u.svcrsp.ibsr_svc_pkey = kibnal_data.kib_svc_pkey;
+
+ kibnal_pack_msg(msg, version, 0, srcnid, srcstamp);
+
+ reply:
+ rc = libcfs_sock_write (sock, msg, msg->ibm_nob,
+ lnet_acceptor_timeout());
+ if (!reject && rc != 0) {
+ /* Only complain if we're not rejecting */
+ CERROR("Error %d replying to svcqry from %u.%u.%u.%u/%d\n",
+ rc, HIPQUAD(peer_ip), peer_port);
+ goto out;
+ }
+
+ out:
+ LIBCFS_FREE(msg, sizeof(*msg));
+}
+
+void
+kibnal_free_acceptsock (kib_acceptsock_t *as)
+{
+ libcfs_sock_release(as->ibas_sock);
+ LIBCFS_FREE(as, sizeof(*as));
}
int
-koibnal_set_mynid(ptl_nid_t nid)
+kibnal_accept(lnet_ni_t *ni, struct socket *sock)
{
- struct timeval tv;
- lib_ni_t *ni = &koibnal_lib.libnal_ni;
- int rc;
+ kib_acceptsock_t *as;
+ unsigned long flags;
- CDEBUG(D_IOCTL, "setting mynid to "LPX64" (old nid="LPX64")\n",
- nid, ni->ni_pid.nid);
+ LIBCFS_ALLOC(as, sizeof(*as));
+ if (as == NULL) {
+ CERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
- do_gettimeofday(&tv);
+ as->ibas_sock = sock;
+
+ spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
+
+ list_add_tail(&as->ibas_list, &kibnal_data.kib_connd_acceptq);
+ wake_up(&kibnal_data.kib_connd_waitq);
- down (&koibnal_data.koib_nid_mutex);
+ spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
+ return 0;
+}
- if (nid == koibnal_data.koib_nid) {
- /* no change of NID */
- up (&koibnal_data.koib_nid_mutex);
- return (0);
- }
+int
+kibnal_start_ib_listener (void)
+{
+ int rc;
- CDEBUG(D_NET, "NID "LPX64"("LPX64")\n",
- koibnal_data.koib_nid, nid);
-
- if (koibnal_data.koib_nid != PTL_NID_ANY) {
+ LASSERT (kibnal_data.kib_listen_handle == NULL);
- koibnal_unadvertise (1);
+ kibnal_data.kib_svc_id = ib_cm_service_assign();
+ CDEBUG(D_NET, "svc id "LPX64"\n", kibnal_data.kib_svc_id);
- rc = ib_cm_listen_stop (koibnal_data.koib_listen_handle);
- if (rc != 0)
- CERROR ("Error %d stopping listener\n", rc);
+ rc = ib_cached_gid_get(kibnal_data.kib_device,
+ kibnal_data.kib_port, 0,
+ kibnal_data.kib_svc_gid);
+ if (rc != 0) {
+ CERROR("Can't get port %d GID: %d\n",
+ kibnal_data.kib_port, rc);
+ return rc;
}
- koibnal_data.koib_nid = ni->ni_pid.nid = nid;
- koibnal_data.koib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-
- /* Delete all existing peers and their connections after new
- * NID/incarnation set to ensure no old connections in our brave
- * new world. */
- koibnal_del_peer (PTL_NID_ANY, 0);
-
- rc = 0;
- if (koibnal_data.koib_nid != PTL_NID_ANY) {
- /* New NID installed */
-
- /* remove any previous advert (crashed node etc) */
- koibnal_unadvertise(0);
+ rc = ib_cached_pkey_get(kibnal_data.kib_device,
+ kibnal_data.kib_port, 0,
+ &kibnal_data.kib_svc_pkey);
+ if (rc != 0) {
+ CERROR ("Can't get port %d PKEY: %d\n",
+ kibnal_data.kib_port, rc);
+ return rc;
+ }
- /* Assign new service number */
- koibnal_data.koib_cm_service_id = ib_cm_service_assign();
- CDEBUG(D_NET, "service_id "LPX64"\n", koibnal_data.koib_cm_service_id);
+ rc = ib_cm_listen(kibnal_data.kib_svc_id,
+ TS_IB_CM_SERVICE_EXACT_MASK,
+ kibnal_passive_conn_callback, NULL,
+ &kibnal_data.kib_listen_handle);
+ if (rc != 0) {
+ kibnal_data.kib_listen_handle = NULL;
+ CERROR ("Can't create IB listener: %d\n", rc);
+ return rc;
+ }
- rc = ib_cm_listen(koibnal_data.koib_cm_service_id,
- TS_IB_CM_SERVICE_EXACT_MASK,
- koibnal_passive_conn_callback, NULL,
- &koibnal_data.koib_listen_handle);
- if (rc != 0) {
- CERROR ("ib_cm_listen error: %d\n", rc);
- goto out;
- }
-
- rc = koibnal_advertise();
+ LASSERT (kibnal_data.kib_listen_handle != NULL);
+ return 0;
+}
- koibnal_check_advert();
- }
+void
+kibnal_stop_ib_listener (void)
+{
+ int rc;
- out:
- if (rc != 0) {
- koibnal_data.koib_nid = PTL_NID_ANY;
- /* remove any peers that sprung up while I failed to
- * advertise myself */
- koibnal_del_peer (PTL_NID_ANY, 0);
- }
+ LASSERT (kibnal_data.kib_listen_handle != NULL);
- up (&koibnal_data.koib_nid_mutex);
- return (0);
+ rc = ib_cm_listen_stop (kibnal_data.kib_listen_handle);
+ if (rc != 0)
+ CERROR("Error stopping IB listener: %d\n", rc);
+
+ kibnal_data.kib_listen_handle = NULL;
}
-koib_peer_t *
-koibnal_create_peer (ptl_nid_t nid)
+int
+kibnal_create_peer (kib_peer_t **peerp, lnet_nid_t nid)
{
- koib_peer_t *peer;
+ kib_peer_t *peer;
+ unsigned long flags;
+ int rc;
- LASSERT (nid != PTL_NID_ANY);
+ LASSERT (nid != LNET_NID_ANY);
- PORTAL_ALLOC (peer, sizeof (*peer));
- if (peer == NULL)
- return (NULL);
+ LIBCFS_ALLOC(peer, sizeof (*peer));
+ if (peer == NULL) {
+ CERROR("Cannot allocate peer\n");
+ return -ENOMEM;
+ }
memset(peer, 0, sizeof(*peer)); /* zero flags etc */
INIT_LIST_HEAD (&peer->ibp_list); /* not in the peer table yet */
INIT_LIST_HEAD (&peer->ibp_conns);
INIT_LIST_HEAD (&peer->ibp_tx_queue);
+ INIT_LIST_HEAD (&peer->ibp_connd_list); /* not queued for connecting */
- peer->ibp_reconnect_time = jiffies;
- peer->ibp_reconnect_interval = OPENIBNAL_MIN_RECONNECT_INTERVAL;
+ peer->ibp_error = 0;
+ peer->ibp_last_alive = cfs_time_current();
+ peer->ibp_reconnect_interval = 0; /* OK to connect at any time */
- atomic_inc (&koibnal_data.koib_npeers);
- return (peer);
+ write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
+
+ if (atomic_read(&kibnal_data.kib_npeers) >=
+ *kibnal_tunables.kib_concurrent_peers) {
+ rc = -EOVERFLOW; /* !! but at least it distinguishes */
+ } else if (kibnal_data.kib_nonewpeers) {
+ rc = -ESHUTDOWN; /* shutdown has started */
+ } else {
+ rc = 0;
+ /* npeers only grows with kib_global_lock held */
+ atomic_inc(&kibnal_data.kib_npeers);
+ }
+
+ write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
+
+ if (rc != 0) {
+ CERROR("Can't create peer: %s\n",
+ (rc == -ESHUTDOWN) ? "shutting down" :
+ "too many peers");
+ LIBCFS_FREE(peer, sizeof(*peer));
+ } else {
+ *peerp = peer;
+ }
+
+ return rc;
}
void
-koibnal_destroy_peer (koib_peer_t *peer)
+kibnal_destroy_peer (kib_peer_t *peer)
{
- CDEBUG (D_NET, "peer "LPX64" %p deleted\n", peer->ibp_nid, peer);
+ CDEBUG (D_NET, "peer %s %p deleted\n",
+ libcfs_nid2str(peer->ibp_nid), peer);
LASSERT (atomic_read (&peer->ibp_refcount) == 0);
LASSERT (peer->ibp_persistence == 0);
- LASSERT (!koibnal_peer_active(peer));
+ LASSERT (!kibnal_peer_active(peer));
LASSERT (peer->ibp_connecting == 0);
+ LASSERT (peer->ibp_accepting == 0);
+ LASSERT (list_empty (&peer->ibp_connd_list));
LASSERT (list_empty (&peer->ibp_conns));
LASSERT (list_empty (&peer->ibp_tx_queue));
- PORTAL_FREE (peer, sizeof (*peer));
+ LIBCFS_FREE (peer, sizeof (*peer));
/* NB a peer's connections keep a reference on their peer until
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
* zero. */
- atomic_dec (&koibnal_data.koib_npeers);
-}
-
-void
-koibnal_put_peer (koib_peer_t *peer)
-{
- CDEBUG (D_OTHER, "putting peer[%p] -> "LPX64" (%d)\n",
- peer, peer->ibp_nid,
- atomic_read (&peer->ibp_refcount));
-
- LASSERT (atomic_read (&peer->ibp_refcount) > 0);
- if (!atomic_dec_and_test (&peer->ibp_refcount))
- return;
-
- koibnal_destroy_peer (peer);
+ atomic_dec(&kibnal_data.kib_npeers);
}
-koib_peer_t *
-koibnal_find_peer_locked (ptl_nid_t nid)
+kib_peer_t *
+kibnal_find_peer_locked (lnet_nid_t nid)
{
- struct list_head *peer_list = koibnal_nid2peerlist (nid);
+ struct list_head *peer_list = kibnal_nid2peerlist (nid);
struct list_head *tmp;
- koib_peer_t *peer;
+ kib_peer_t *peer;
list_for_each (tmp, peer_list) {
- peer = list_entry (tmp, koib_peer_t, ibp_list);
+ peer = list_entry (tmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_persistence != 0 || /* persistent peer */
peer->ibp_connecting != 0 || /* creating conns */
+ peer->ibp_accepting != 0 ||
!list_empty (&peer->ibp_conns)); /* active conn */
if (peer->ibp_nid != nid)
continue;
- CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n",
- peer, nid, atomic_read (&peer->ibp_refcount));
return (peer);
}
return (NULL);
}
-koib_peer_t *
-koibnal_get_peer (ptl_nid_t nid)
+kib_peer_t *
+kibnal_get_peer (lnet_nid_t nid)
{
- koib_peer_t *peer;
+ kib_peer_t *peer;
+ unsigned long flags;
- read_lock (&koibnal_data.koib_global_lock);
- peer = koibnal_find_peer_locked (nid);
+ read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
+ peer = kibnal_find_peer_locked (nid);
if (peer != NULL) /* +1 ref for caller? */
- atomic_inc (&peer->ibp_refcount);
- read_unlock (&koibnal_data.koib_global_lock);
+ kibnal_peer_addref(peer);
+ read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
return (peer);
}
void
-koibnal_unlink_peer_locked (koib_peer_t *peer)
+kibnal_unlink_peer_locked (kib_peer_t *peer)
{
LASSERT (peer->ibp_persistence == 0);
LASSERT (list_empty(&peer->ibp_conns));
- LASSERT (koibnal_peer_active(peer));
+ LASSERT (kibnal_peer_active(peer));
list_del_init (&peer->ibp_list);
/* lose peerlist's ref */
- koibnal_put_peer (peer);
+ kibnal_peer_decref(peer);
}
int
-koibnal_get_peer_info (int index, ptl_nid_t *nidp, int *persistencep)
+kibnal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
+ int *persistencep)
{
- koib_peer_t *peer;
+ kib_peer_t *peer;
struct list_head *ptmp;
+ unsigned long flags;
int i;
- read_lock (&koibnal_data.koib_global_lock);
+ read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
- for (i = 0; i < koibnal_data.koib_peer_hash_size; i++) {
+ for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
- list_for_each (ptmp, &koibnal_data.koib_peers[i]) {
+ list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
- peer = list_entry (ptmp, koib_peer_t, ibp_list);
+ peer = list_entry (ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_persistence != 0 ||
peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0 ||
!list_empty (&peer->ibp_conns));
if (index-- > 0)
continue;
*nidp = peer->ibp_nid;
+ *ipp = peer->ibp_ip;
+ *portp = peer->ibp_port;
*persistencep = peer->ibp_persistence;
- read_unlock (&koibnal_data.koib_global_lock);
+ read_unlock_irqrestore(&kibnal_data.kib_global_lock,
+ flags);
return (0);
}
}
- read_unlock (&koibnal_data.koib_global_lock);
+ read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
return (-ENOENT);
}
int
-koibnal_add_persistent_peer (ptl_nid_t nid)
+kibnal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
{
unsigned long flags;
- koib_peer_t *peer;
- koib_peer_t *peer2;
+ kib_peer_t *peer;
+ kib_peer_t *peer2;
+ int rc;
- if (nid == PTL_NID_ANY)
+ if (nid == LNET_NID_ANY)
return (-EINVAL);
- peer = koibnal_create_peer (nid);
- if (peer == NULL)
- return (-ENOMEM);
+ rc = kibnal_create_peer (&peer, nid);
+ if (rc != 0)
+ return rc;
+
+ write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
- write_lock_irqsave (&koibnal_data.koib_global_lock, flags);
+ /* I'm always called with a reference on kibnal_data.kib_ni
+ * so shutdown can't have started */
+ LASSERT (kibnal_data.kib_nonewpeers == 0);
- peer2 = koibnal_find_peer_locked (nid);
+ peer2 = kibnal_find_peer_locked (nid);
if (peer2 != NULL) {
- koibnal_put_peer (peer);
+ kibnal_peer_decref(peer);
peer = peer2;
} else {
/* peer table takes existing ref on peer */
list_add_tail (&peer->ibp_list,
- koibnal_nid2peerlist (nid));
+ kibnal_nid2peerlist (nid));
}
+ peer->ibp_ip = ip;
+ peer->ibp_port = port;
peer->ibp_persistence++;
- write_unlock_irqrestore (&koibnal_data.koib_global_lock, flags);
+ write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
return (0);
}
void
-koibnal_del_peer_locked (koib_peer_t *peer, int single_share)
+kibnal_del_peer_locked (kib_peer_t *peer)
{
struct list_head *ctmp;
struct list_head *cnxt;
- koib_conn_t *conn;
+ kib_conn_t *conn;
- if (!single_share)
- peer->ibp_persistence = 0;
- else if (peer->ibp_persistence > 0)
- peer->ibp_persistence--;
+ peer->ibp_persistence = 0;
- if (peer->ibp_persistence != 0)
- return;
-
- list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, koib_conn_t, ibc_list);
+ if (list_empty(&peer->ibp_conns)) {
+ kibnal_unlink_peer_locked(peer);
+ } else {
+ list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- koibnal_close_conn_locked (conn, 0);
+ kibnal_close_conn_locked (conn, 0);
+ }
+ /* NB peer is no longer persistent; closing its last conn
+ * unlinked it. */
}
-
- /* NB peer unlinks itself when last conn is closed */
+ /* NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it. */
}
int
-koibnal_del_peer (ptl_nid_t nid, int single_share)
+kibnal_del_peer (lnet_nid_t nid)
{
unsigned long flags;
+ CFS_LIST_HEAD (zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- koib_peer_t *peer;
+ kib_peer_t *peer;
int lo;
int hi;
int i;
int rc = -ENOENT;
- write_lock_irqsave (&koibnal_data.koib_global_lock, flags);
+ write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
- if (nid != PTL_NID_ANY)
- lo = hi = koibnal_nid2peerlist(nid) - koibnal_data.koib_peers;
+ if (nid != LNET_NID_ANY)
+ lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
else {
lo = 0;
- hi = koibnal_data.koib_peer_hash_size - 1;
+ hi = kibnal_data.kib_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &koibnal_data.koib_peers[i]) {
- peer = list_entry (ptmp, koib_peer_t, ibp_list);
+ list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
+ peer = list_entry (ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_persistence != 0 ||
peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0 ||
!list_empty (&peer->ibp_conns));
- if (!(nid == PTL_NID_ANY || peer->ibp_nid == nid))
+ if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
continue;
- koibnal_del_peer_locked (peer, single_share);
- rc = 0; /* matched something */
+ if (!list_empty(&peer->ibp_tx_queue)) {
+ LASSERT (list_empty(&peer->ibp_conns));
+
+ list_splice_init(&peer->ibp_tx_queue, &zombies);
+ }
- if (single_share)
- goto out;
+ kibnal_del_peer_locked (peer);
+ rc = 0; /* matched something */
}
}
- out:
- write_unlock_irqrestore (&koibnal_data.koib_global_lock, flags);
+
+ write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
+
+ kibnal_txlist_done(&zombies, -EIO);
return (rc);
}
-koib_conn_t *
-koibnal_get_conn_by_idx (int index)
+kib_conn_t *
+kibnal_get_conn_by_idx (int index)
{
- koib_peer_t *peer;
+ kib_peer_t *peer;
struct list_head *ptmp;
- koib_conn_t *conn;
+ kib_conn_t *conn;
struct list_head *ctmp;
+ unsigned long flags;
int i;
- read_lock (&koibnal_data.koib_global_lock);
+ read_lock_irqsave(&kibnal_data.kib_global_lock, flags);
- for (i = 0; i < koibnal_data.koib_peer_hash_size; i++) {
- list_for_each (ptmp, &koibnal_data.koib_peers[i]) {
+ for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
+ list_for_each (ptmp, &kibnal_data.kib_peers[i]) {
- peer = list_entry (ptmp, koib_peer_t, ibp_list);
+ peer = list_entry (ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_persistence > 0 ||
peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0 ||
!list_empty (&peer->ibp_conns));
list_for_each (ctmp, &peer->ibp_conns) {
if (index-- > 0)
continue;
- conn = list_entry (ctmp, koib_conn_t, ibc_list);
- CDEBUG(D_NET, "++conn[%p] state %d -> "LPX64" (%d)\n",
- conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
- atomic_read (&conn->ibc_refcount));
- atomic_inc (&conn->ibc_refcount);
- read_unlock (&koibnal_data.koib_global_lock);
+ conn = list_entry (ctmp, kib_conn_t, ibc_list);
+ kibnal_conn_addref(conn);
+ read_unlock_irqrestore(&kibnal_data.kib_global_lock,
+ flags);
return (conn);
}
}
}
- read_unlock (&koibnal_data.koib_global_lock);
+ read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
return (NULL);
}
-koib_conn_t *
-koibnal_create_conn (void)
+kib_conn_t *
+kibnal_create_conn (void)
{
- koib_conn_t *conn;
+ kib_conn_t *conn;
int i;
__u64 vaddr = 0;
__u64 vaddr_base;
struct ib_qp_attribute qp_attr;
} params;
- PORTAL_ALLOC (conn, sizeof (*conn));
+ LIBCFS_ALLOC (conn, sizeof (*conn));
if (conn == NULL) {
CERROR ("Can't allocate connection\n");
return (NULL);
/* zero flags, NULL pointers etc... */
memset (conn, 0, sizeof (*conn));
+ INIT_LIST_HEAD (&conn->ibc_tx_queue_nocred);
INIT_LIST_HEAD (&conn->ibc_tx_queue);
- INIT_LIST_HEAD (&conn->ibc_rdma_queue);
+ INIT_LIST_HEAD (&conn->ibc_tx_queue_rsrvd);
+ INIT_LIST_HEAD (&conn->ibc_active_txs);
spin_lock_init (&conn->ibc_lock);
- atomic_inc (&koibnal_data.koib_nconns);
+ atomic_inc (&kibnal_data.kib_nconns);
/* well not really, but I call destroy() on failure, which decrements */
- PORTAL_ALLOC (conn->ibc_rxs, OPENIBNAL_RX_MSGS * sizeof (koib_rx_t));
+ LIBCFS_ALLOC (conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t));
if (conn->ibc_rxs == NULL)
goto failed;
- memset (conn->ibc_rxs, 0, OPENIBNAL_RX_MSGS * sizeof(koib_rx_t));
+ memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t));
- rc = koibnal_alloc_pages(&conn->ibc_rx_pages,
- OPENIBNAL_RX_MSG_PAGES,
- IB_ACCESS_LOCAL_WRITE);
+ rc = kibnal_alloc_pages(&conn->ibc_rx_pages,
+ IBNAL_RX_MSG_PAGES,
+ IB_ACCESS_LOCAL_WRITE);
if (rc != 0)
goto failed;
- vaddr_base = vaddr = conn->ibc_rx_pages->oibp_vaddr;
+ vaddr_base = vaddr = conn->ibc_rx_pages->ibp_vaddr;
- for (i = ipage = page_offset = 0; i < OPENIBNAL_RX_MSGS; i++) {
- struct page *page = conn->ibc_rx_pages->oibp_pages[ipage];
- koib_rx_t *rx = &conn->ibc_rxs[i];
+ for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) {
+ struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
+ kib_rx_t *rx = &conn->ibc_rxs[i];
rx->rx_conn = conn;
rx->rx_vaddr = vaddr;
- rx->rx_msg = (koib_msg_t *)(((char *)page_address(page)) + page_offset);
+ rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
- vaddr += OPENIBNAL_MSG_SIZE;
- LASSERT (vaddr <= vaddr_base + OPENIBNAL_RX_MSG_BYTES);
+ vaddr += IBNAL_MSG_SIZE;
+ LASSERT (vaddr <= vaddr_base + IBNAL_RX_MSG_BYTES);
- page_offset += OPENIBNAL_MSG_SIZE;
+ page_offset += IBNAL_MSG_SIZE;
LASSERT (page_offset <= PAGE_SIZE);
if (page_offset == PAGE_SIZE) {
page_offset = 0;
ipage++;
- LASSERT (ipage <= OPENIBNAL_RX_MSG_PAGES);
+ LASSERT (ipage <= IBNAL_RX_MSG_PAGES);
}
}
+ /* We can post up to IBLND_MSG_QUEUE_SIZE immediate/req messages and
+ * the same # of ack/nak/rdma+done messages */
+
params.qp_create = (struct ib_qp_create_param) {
.limit = {
- /* Sends have an optional RDMA */
- .max_outstanding_send_request = 2 * OPENIBNAL_MSG_QUEUE_SIZE,
- .max_outstanding_receive_request = OPENIBNAL_MSG_QUEUE_SIZE,
+ .max_outstanding_send_request = 3 * IBNAL_MSG_QUEUE_SIZE,
+ .max_outstanding_receive_request = IBNAL_RX_MSGS,
.max_send_gather_element = 1,
.max_receive_scatter_element = 1,
},
- .pd = koibnal_data.koib_pd,
- .send_queue = koibnal_data.koib_tx_cq,
- .receive_queue = koibnal_data.koib_rx_cq,
+ .pd = kibnal_data.kib_pd,
+ .send_queue = kibnal_data.kib_cq,
+ .receive_queue = kibnal_data.kib_cq,
.send_policy = IB_WQ_SIGNAL_SELECTABLE,
.receive_policy = IB_WQ_SIGNAL_SELECTABLE,
.rd_domain = 0,
}
/* Mark QP created */
- conn->ibc_state = OPENIBNAL_CONN_INIT_QP;
+ conn->ibc_state = IBNAL_CONN_INIT_QP;
params.qp_attr = (struct ib_qp_attribute) {
.state = IB_QP_STATE_INIT,
- .port = koibnal_data.koib_port,
+ .port = kibnal_data.kib_port,
.enable_rdma_read = 1,
.enable_rdma_write = 1,
.valid_fields = (IB_QP_ATTRIBUTE_STATE |
return (conn);
failed:
- koibnal_destroy_conn (conn);
+ kibnal_destroy_conn (conn);
return (NULL);
}
void
-koibnal_destroy_conn (koib_conn_t *conn)
+kibnal_destroy_conn (kib_conn_t *conn)
{
int rc;
LASSERT (atomic_read (&conn->ibc_refcount) == 0);
LASSERT (list_empty(&conn->ibc_tx_queue));
- LASSERT (list_empty(&conn->ibc_rdma_queue));
+ LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT (list_empty(&conn->ibc_active_txs));
LASSERT (conn->ibc_nsends_posted == 0);
LASSERT (conn->ibc_connreq == NULL);
switch (conn->ibc_state) {
- case OPENIBNAL_CONN_ZOMBIE:
+ case IBNAL_CONN_ZOMBIE:
/* called after connection sequence initiated */
- case OPENIBNAL_CONN_INIT_QP:
+ case IBNAL_CONN_INIT_QP:
rc = ib_qp_destroy(conn->ibc_qp);
if (rc != 0)
CERROR("Can't destroy QP: %d\n", rc);
/* fall through */
- case OPENIBNAL_CONN_INIT_NOTHING:
+ case IBNAL_CONN_INIT_NOTHING:
break;
default:
}
if (conn->ibc_rx_pages != NULL)
- koibnal_free_pages(conn->ibc_rx_pages);
+ kibnal_free_pages(conn->ibc_rx_pages);
if (conn->ibc_rxs != NULL)
- PORTAL_FREE(conn->ibc_rxs,
- OPENIBNAL_RX_MSGS * sizeof(koib_rx_t));
+ LIBCFS_FREE(conn->ibc_rxs,
+ IBNAL_RX_MSGS * sizeof(kib_rx_t));
if (conn->ibc_peer != NULL)
- koibnal_put_peer(conn->ibc_peer);
+ kibnal_peer_decref(conn->ibc_peer);
- PORTAL_FREE(conn, sizeof (*conn));
+ LIBCFS_FREE(conn, sizeof (*conn));
- atomic_dec(&koibnal_data.koib_nconns);
+ atomic_dec(&kibnal_data.kib_nconns);
- if (atomic_read (&koibnal_data.koib_nconns) == 0 &&
- koibnal_data.koib_shutdown) {
+ if (atomic_read (&kibnal_data.kib_nconns) == 0 &&
+ kibnal_data.kib_shutdown) {
/* I just nuked the last connection on shutdown; wake up
* everyone so they can exit. */
- wake_up_all(&koibnal_data.koib_sched_waitq);
- wake_up_all(&koibnal_data.koib_connd_waitq);
+ wake_up_all(&kibnal_data.kib_sched_waitq);
+ wake_up_all(&kibnal_data.kib_reaper_waitq);
}
}
-void
-koibnal_put_conn (koib_conn_t *conn)
-{
- unsigned long flags;
-
- CDEBUG (D_NET, "putting conn[%p] state %d -> "LPX64" (%d)\n",
- conn, conn->ibc_state, conn->ibc_peer->ibp_nid,
- atomic_read (&conn->ibc_refcount));
-
- LASSERT (atomic_read (&conn->ibc_refcount) > 0);
- if (!atomic_dec_and_test (&conn->ibc_refcount))
- return;
-
- /* last ref only goes on zombies */
- LASSERT (conn->ibc_state == OPENIBNAL_CONN_ZOMBIE);
-
- spin_lock_irqsave (&koibnal_data.koib_connd_lock, flags);
-
- list_add (&conn->ibc_list, &koibnal_data.koib_connd_conns);
- wake_up (&koibnal_data.koib_connd_waitq);
-
- spin_unlock_irqrestore (&koibnal_data.koib_connd_lock, flags);
-}
-
int
-koibnal_close_peer_conns_locked (koib_peer_t *peer, int why)
+kibnal_close_peer_conns_locked (kib_peer_t *peer, int why)
{
- koib_conn_t *conn;
+ kib_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry (ctmp, koib_conn_t, ibc_list);
+ conn = list_entry (ctmp, kib_conn_t, ibc_list);
count++;
- koibnal_close_conn_locked (conn, why);
+ kibnal_close_conn_locked (conn, why);
}
return (count);
}
int
-koibnal_close_stale_conns_locked (koib_peer_t *peer, __u64 incarnation)
+kibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
{
- koib_conn_t *conn;
+ kib_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry (ctmp, koib_conn_t, ibc_list);
+ conn = list_entry (ctmp, kib_conn_t, ibc_list);
if (conn->ibc_incarnation == incarnation)
continue;
- CDEBUG(D_NET, "Closing stale conn nid:"LPX64" incarnation:"LPX64"("LPX64")\n",
- peer->ibp_nid, conn->ibc_incarnation, incarnation);
+ CDEBUG(D_NET, "Closing stale conn %p nid: %s"
+ " incarnation:"LPX64"("LPX64")\n", conn,
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_incarnation, incarnation);
count++;
- koibnal_close_conn_locked (conn, -ESTALE);
+ kibnal_close_conn_locked (conn, -ESTALE);
}
return (count);
}
int
-koibnal_close_matching_conns (ptl_nid_t nid)
+kibnal_close_matching_conns (lnet_nid_t nid)
{
unsigned long flags;
- koib_peer_t *peer;
+ kib_peer_t *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
int i;
int count = 0;
- write_lock_irqsave (&koibnal_data.koib_global_lock, flags);
+ write_lock_irqsave (&kibnal_data.kib_global_lock, flags);
- if (nid != PTL_NID_ANY)
- lo = hi = koibnal_nid2peerlist(nid) - koibnal_data.koib_peers;
+ if (nid != LNET_NID_ANY)
+ lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers;
else {
lo = 0;
- hi = koibnal_data.koib_peer_hash_size - 1;
+ hi = kibnal_data.kib_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe (ptmp, pnxt, &koibnal_data.koib_peers[i]) {
+ list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) {
- peer = list_entry (ptmp, koib_peer_t, ibp_list);
+ peer = list_entry (ptmp, kib_peer_t, ibp_list);
LASSERT (peer->ibp_persistence != 0 ||
peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0 ||
!list_empty (&peer->ibp_conns));
- if (!(nid == PTL_NID_ANY || nid == peer->ibp_nid))
+ if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
continue;
- count += koibnal_close_peer_conns_locked (peer, 0);
+ count += kibnal_close_peer_conns_locked (peer, 0);
}
}
- write_unlock_irqrestore (&koibnal_data.koib_global_lock, flags);
+ write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags);
/* wildcards always succeed */
- if (nid == PTL_NID_ANY)
+ if (nid == LNET_NID_ANY)
return (0);
return (count == 0 ? -ENOENT : 0);
}
int
-koibnal_cmd(struct portals_cfg *pcfg, void * private)
+kibnal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
- int rc = -EINVAL;
+ struct libcfs_ioctl_data *data = arg;
+ int rc = -EINVAL;
- LASSERT (pcfg != NULL);
+ LASSERT (ni == kibnal_data.kib_ni);
- switch(pcfg->pcfg_command) {
- case NAL_CMD_GET_PEER: {
- ptl_nid_t nid = 0;
+ switch(cmd) {
+ case IOC_LIBCFS_GET_PEER: {
+ lnet_nid_t nid = 0;
+ __u32 ip = 0;
+ int port = 0;
int share_count = 0;
- rc = koibnal_get_peer_info(pcfg->pcfg_count,
- &nid, &share_count);
- pcfg->pcfg_nid = nid;
- pcfg->pcfg_size = 0;
- pcfg->pcfg_id = 0;
- pcfg->pcfg_misc = 0;
- pcfg->pcfg_count = 0;
- pcfg->pcfg_wait = share_count;
+ rc = kibnal_get_peer_info(data->ioc_count,
+ &nid, &ip, &port, &share_count);
+ data->ioc_nid = nid;
+ data->ioc_count = share_count;
+ data->ioc_u32[0] = ip;
+ data->ioc_u32[1] = port;
break;
}
- case NAL_CMD_ADD_PEER: {
- rc = koibnal_add_persistent_peer (pcfg->pcfg_nid);
+ case IOC_LIBCFS_ADD_PEER: {
+ rc = kibnal_add_persistent_peer (data->ioc_nid,
+ data->ioc_u32[0], /* IP */
+ data->ioc_u32[1]); /* port */
break;
}
- case NAL_CMD_DEL_PEER: {
- rc = koibnal_del_peer (pcfg->pcfg_nid,
- /* flags == single_share */
- pcfg->pcfg_flags != 0);
+ case IOC_LIBCFS_DEL_PEER: {
+ rc = kibnal_del_peer (data->ioc_nid);
break;
}
- case NAL_CMD_GET_CONN: {
- koib_conn_t *conn = koibnal_get_conn_by_idx (pcfg->pcfg_count);
+ case IOC_LIBCFS_GET_CONN: {
+ kib_conn_t *conn = kibnal_get_conn_by_idx (data->ioc_count);
if (conn == NULL)
rc = -ENOENT;
else {
rc = 0;
- pcfg->pcfg_nid = conn->ibc_peer->ibp_nid;
- pcfg->pcfg_id = 0;
- pcfg->pcfg_misc = 0;
- pcfg->pcfg_flags = 0;
- koibnal_put_conn (conn);
+ data->ioc_nid = conn->ibc_peer->ibp_nid;
+ kibnal_conn_decref(conn);
}
break;
}
- case NAL_CMD_CLOSE_CONNECTION: {
- rc = koibnal_close_matching_conns (pcfg->pcfg_nid);
+ case IOC_LIBCFS_CLOSE_CONNECTION: {
+ rc = kibnal_close_matching_conns (data->ioc_nid);
break;
}
- case NAL_CMD_REGISTER_MYNID: {
- if (pcfg->pcfg_nid == PTL_NID_ANY)
+ case IOC_LIBCFS_REGISTER_MYNID: {
+ /* Ignore if this is a noop */
+ if (data->ioc_nid == ni->ni_nid) {
+ rc = 0;
+ } else {
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nid2str(ni->ni_nid));
rc = -EINVAL;
- else
- rc = koibnal_set_mynid (pcfg->pcfg_nid);
+ }
break;
}
}
}
void
-koibnal_free_pages (koib_pages_t *p)
+kibnal_free_pages (kib_pages_t *p)
{
- int npages = p->oibp_npages;
+ int npages = p->ibp_npages;
int rc;
int i;
- if (p->oibp_mapped) {
- rc = ib_memory_deregister(p->oibp_handle);
+ if (p->ibp_mapped) {
+ rc = ib_memory_deregister(p->ibp_handle);
if (rc != 0)
CERROR ("Deregister error: %d\n", rc);
}
for (i = 0; i < npages; i++)
- if (p->oibp_pages[i] != NULL)
- __free_page(p->oibp_pages[i]);
+ if (p->ibp_pages[i] != NULL)
+ __free_page(p->ibp_pages[i]);
- PORTAL_FREE (p, offsetof(koib_pages_t, oibp_pages[npages]));
+ LIBCFS_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));
}
int
-koibnal_alloc_pages (koib_pages_t **pp, int npages, int access)
+kibnal_alloc_pages (kib_pages_t **pp, int npages, int access)
{
- koib_pages_t *p;
+ kib_pages_t *p;
struct ib_physical_buffer *phys_pages;
int i;
int rc;
- PORTAL_ALLOC(p, offsetof(koib_pages_t, oibp_pages[npages]));
+ LIBCFS_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages]));
if (p == NULL) {
CERROR ("Can't allocate buffer %d\n", npages);
return (-ENOMEM);
}
- memset (p, 0, offsetof(koib_pages_t, oibp_pages[npages]));
- p->oibp_npages = npages;
+ memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
- p->oibp_pages[i] = alloc_page (GFP_KERNEL);
- if (p->oibp_pages[i] == NULL) {
+ p->ibp_pages[i] = alloc_page (GFP_KERNEL);
+ if (p->ibp_pages[i] == NULL) {
CERROR ("Can't allocate page %d of %d\n", i, npages);
- koibnal_free_pages(p);
+ kibnal_free_pages(p);
return (-ENOMEM);
}
}
- PORTAL_ALLOC(phys_pages, npages * sizeof(*phys_pages));
+ LIBCFS_ALLOC(phys_pages, npages * sizeof(*phys_pages));
if (phys_pages == NULL) {
CERROR ("Can't allocate physarray for %d pages\n", npages);
- koibnal_free_pages(p);
+ kibnal_free_pages(p);
return (-ENOMEM);
}
for (i = 0; i < npages; i++) {
phys_pages[i].size = PAGE_SIZE;
phys_pages[i].address =
- koibnal_page2phys(p->oibp_pages[i]);
+ lnet_page2phys(p->ibp_pages[i]);
}
- p->oibp_vaddr = 0;
- rc = ib_memory_register_physical(koibnal_data.koib_pd,
+ p->ibp_vaddr = 0;
+ rc = ib_memory_register_physical(kibnal_data.kib_pd,
phys_pages, npages,
- &p->oibp_vaddr,
+ &p->ibp_vaddr,
npages * PAGE_SIZE, 0,
access,
- &p->oibp_handle,
- &p->oibp_lkey,
- &p->oibp_rkey);
+ &p->ibp_handle,
+ &p->ibp_lkey,
+ &p->ibp_rkey);
- PORTAL_FREE(phys_pages, npages * sizeof(*phys_pages));
+ LIBCFS_FREE(phys_pages, npages * sizeof(*phys_pages));
if (rc != 0) {
CERROR ("Error %d mapping %d pages\n", rc, npages);
- koibnal_free_pages(p);
+ kibnal_free_pages(p);
return (rc);
}
- p->oibp_mapped = 1;
+ p->ibp_mapped = 1;
*pp = p;
return (0);
}
int
-koibnal_setup_tx_descs (void)
+kibnal_setup_tx_descs (void)
{
int ipage = 0;
int page_offset = 0;
__u64 vaddr;
__u64 vaddr_base;
struct page *page;
- koib_tx_t *tx;
+ kib_tx_t *tx;
int i;
int rc;
/* pre-mapped messages are not bigger than 1 page */
- LASSERT (OPENIBNAL_MSG_SIZE <= PAGE_SIZE);
+ LASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
- LASSERT (PAGE_SIZE % OPENIBNAL_MSG_SIZE == 0);
+ LASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0);
- rc = koibnal_alloc_pages(&koibnal_data.koib_tx_pages,
- OPENIBNAL_TX_MSG_PAGES,
- 0); /* local read access only */
+ rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages,
+ IBNAL_TX_MSG_PAGES(),
+ 0); /* local read access only */
if (rc != 0)
return (rc);
- vaddr = vaddr_base = koibnal_data.koib_tx_pages->oibp_vaddr;
+ vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr;
- for (i = 0; i < OPENIBNAL_TX_MSGS; i++) {
- page = koibnal_data.koib_tx_pages->oibp_pages[ipage];
- tx = &koibnal_data.koib_tx_descs[i];
+ for (i = 0; i < IBNAL_TX_MSGS(); i++) {
+ page = kibnal_data.kib_tx_pages->ibp_pages[ipage];
+ tx = &kibnal_data.kib_tx_descs[i];
memset (tx, 0, sizeof(*tx)); /* zero flags etc */
- tx->tx_msg = (koib_msg_t *)(((char *)page_address(page)) + page_offset);
+ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset);
tx->tx_vaddr = vaddr;
- tx->tx_isnblk = (i >= OPENIBNAL_NTX);
- tx->tx_mapped = KOIB_TX_UNMAPPED;
+ tx->tx_mapped = KIB_TX_UNMAPPED;
CDEBUG(D_NET, "Tx[%d] %p->%p - "LPX64"\n",
i, tx, tx->tx_msg, tx->tx_vaddr);
- if (tx->tx_isnblk)
- list_add (&tx->tx_list,
- &koibnal_data.koib_idle_nblk_txs);
- else
- list_add (&tx->tx_list,
- &koibnal_data.koib_idle_txs);
+ list_add (&tx->tx_list, &kibnal_data.kib_idle_txs);
- vaddr += OPENIBNAL_MSG_SIZE;
- LASSERT (vaddr <= vaddr_base + OPENIBNAL_TX_MSG_BYTES);
+ vaddr += IBNAL_MSG_SIZE;
+ LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES());
- page_offset += OPENIBNAL_MSG_SIZE;
+ page_offset += IBNAL_MSG_SIZE;
LASSERT (page_offset <= PAGE_SIZE);
if (page_offset == PAGE_SIZE) {
page_offset = 0;
ipage++;
- LASSERT (ipage <= OPENIBNAL_TX_MSG_PAGES);
+ LASSERT (ipage <= IBNAL_TX_MSG_PAGES());
}
}
}
void
-koibnal_api_shutdown (nal_t *nal)
+kibnal_shutdown (lnet_ni_t *ni)
{
- int i;
- int rc;
-
- if (nal->nal_refct != 0) {
- /* This module got the first ref */
- PORTAL_MODULE_UNUSE;
- return;
- }
+ int i;
+ int rc;
+ unsigned long flags;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
- atomic_read (&portal_kmemory));
+ atomic_read (&libcfs_kmemory));
- LASSERT(nal == &koibnal_api);
+ LASSERT(ni == kibnal_data.kib_ni);
+ LASSERT(ni->ni_data == &kibnal_data);
- switch (koibnal_data.koib_init) {
+ switch (kibnal_data.kib_init) {
default:
- CERROR ("Unexpected state %d\n", koibnal_data.koib_init);
+ CERROR ("Unexpected state %d\n", kibnal_data.kib_init);
LBUG();
- case OPENIBNAL_INIT_ALL:
- /* stop calls to nal_cmd */
- libcfs_nal_cmd_unregister(OPENIBNAL);
- /* No new peers */
+ case IBNAL_INIT_ALL:
+ /* Prevent new peers from being created */
+ write_lock_irqsave(&kibnal_data.kib_global_lock, flags);
+ kibnal_data.kib_nonewpeers = 1;
+ write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);
- /* resetting my NID to unadvertises me, removes my
- * listener and nukes all current peers */
- koibnal_set_mynid (PTL_NID_ANY);
+ kibnal_stop_ib_listener();
+
+ /* Remove all existing peers from the peer table */
+ kibnal_del_peer(LNET_NID_ANY);
+
+ /* Wait for pending conn reqs to be handled */
+ i = 2;
+ spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
+ while (!list_empty(&kibnal_data.kib_connd_acceptq)) {
+ spin_unlock_irqrestore(&kibnal_data.kib_connd_lock,
+ flags);
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
+ "waiting for conn reqs to clean up\n");
+ cfs_pause(cfs_time_seconds(1));
+
+ spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);
+ }
+ spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);
/* Wait for all peer state to clean up */
i = 2;
- while (atomic_read (&koibnal_data.koib_npeers) != 0) {
+ while (atomic_read(&kibnal_data.kib_npeers) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to close down\n",
- atomic_read (&koibnal_data.koib_npeers));
- set_current_state (TASK_INTERRUPTIBLE);
- schedule_timeout (HZ);
+ atomic_read(&kibnal_data.kib_npeers));
+ cfs_pause(cfs_time_seconds(1));
}
/* fall through */
- case OPENIBNAL_INIT_TX_CQ:
- rc = ib_cq_destroy (koibnal_data.koib_tx_cq);
- if (rc != 0)
- CERROR ("Destroy tx CQ error: %d\n", rc);
- /* fall through */
-
- case OPENIBNAL_INIT_RX_CQ:
- rc = ib_cq_destroy (koibnal_data.koib_rx_cq);
+ case IBNAL_INIT_CQ:
+ rc = ib_cq_destroy (kibnal_data.kib_cq);
if (rc != 0)
- CERROR ("Destroy rx CQ error: %d\n", rc);
+ CERROR ("Destroy CQ error: %d\n", rc);
/* fall through */
- case OPENIBNAL_INIT_TXD:
- koibnal_free_pages (koibnal_data.koib_tx_pages);
+ case IBNAL_INIT_TXD:
+ kibnal_free_pages (kibnal_data.kib_tx_pages);
/* fall through */
-#if OPENIBNAL_FMR
- case OPENIBNAL_INIT_FMR:
- rc = ib_fmr_pool_destroy (koibnal_data.koib_fmr_pool);
+#if IBNAL_FMR
+ case IBNAL_INIT_FMR:
+ rc = ib_fmr_pool_destroy (kibnal_data.kib_fmr_pool);
if (rc != 0)
CERROR ("Destroy FMR pool error: %d\n", rc);
/* fall through */
#endif
- case OPENIBNAL_INIT_PD:
- rc = ib_pd_destroy(koibnal_data.koib_pd);
+ case IBNAL_INIT_PD:
+ rc = ib_pd_destroy(kibnal_data.kib_pd);
if (rc != 0)
CERROR ("Destroy PD error: %d\n", rc);
/* fall through */
- case OPENIBNAL_INIT_LIB:
- lib_fini(&koibnal_lib);
- /* fall through */
-
- case OPENIBNAL_INIT_DATA:
+ case IBNAL_INIT_DATA:
/* Module refcount only gets to zero when all peers
* have been closed so all lists must be empty */
- LASSERT (atomic_read (&koibnal_data.koib_npeers) == 0);
- LASSERT (koibnal_data.koib_peers != NULL);
- for (i = 0; i < koibnal_data.koib_peer_hash_size; i++) {
- LASSERT (list_empty (&koibnal_data.koib_peers[i]));
+ LASSERT (atomic_read(&kibnal_data.kib_npeers) == 0);
+ LASSERT (kibnal_data.kib_peers != NULL);
+ for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {
+ LASSERT (list_empty (&kibnal_data.kib_peers[i]));
}
- LASSERT (atomic_read (&koibnal_data.koib_nconns) == 0);
- LASSERT (list_empty (&koibnal_data.koib_sched_rxq));
- LASSERT (list_empty (&koibnal_data.koib_sched_txq));
- LASSERT (list_empty (&koibnal_data.koib_connd_conns));
- LASSERT (list_empty (&koibnal_data.koib_connd_peers));
+ LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0);
+ LASSERT (list_empty (&kibnal_data.kib_sched_rxq));
+ LASSERT (list_empty (&kibnal_data.kib_sched_txq));
+ LASSERT (list_empty (&kibnal_data.kib_reaper_conns));
+ LASSERT (list_empty (&kibnal_data.kib_connd_peers));
+ LASSERT (list_empty (&kibnal_data.kib_connd_acceptq));
/* flag threads to terminate; wake and wait for them to die */
- koibnal_data.koib_shutdown = 1;
- wake_up_all (&koibnal_data.koib_sched_waitq);
- wake_up_all (&koibnal_data.koib_connd_waitq);
+ kibnal_data.kib_shutdown = 1;
+ wake_up_all (&kibnal_data.kib_sched_waitq);
+ wake_up_all (&kibnal_data.kib_reaper_waitq);
+ wake_up_all (&kibnal_data.kib_connd_waitq);
i = 2;
- while (atomic_read (&koibnal_data.koib_nthreads) != 0) {
+ while (atomic_read (&kibnal_data.kib_nthreads) != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
- atomic_read (&koibnal_data.koib_nthreads));
- set_current_state (TASK_INTERRUPTIBLE);
- schedule_timeout (HZ);
+ atomic_read (&kibnal_data.kib_nthreads));
+ cfs_pause(cfs_time_seconds(1));
}
/* fall through */
- case OPENIBNAL_INIT_NOTHING:
+ case IBNAL_INIT_NOTHING:
break;
}
- if (koibnal_data.koib_tx_descs != NULL)
- PORTAL_FREE (koibnal_data.koib_tx_descs,
- OPENIBNAL_TX_MSGS * sizeof(koib_tx_t));
+ if (kibnal_data.kib_tx_descs != NULL)
+ LIBCFS_FREE (kibnal_data.kib_tx_descs,
+ IBNAL_TX_MSGS() * sizeof(kib_tx_t));
- if (koibnal_data.koib_peers != NULL)
- PORTAL_FREE (koibnal_data.koib_peers,
+ if (kibnal_data.kib_peers != NULL)
+ LIBCFS_FREE (kibnal_data.kib_peers,
sizeof (struct list_head) *
- koibnal_data.koib_peer_hash_size);
+ kibnal_data.kib_peer_hash_size);
CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
- atomic_read (&portal_kmemory));
- printk(KERN_INFO "Lustre: OpenIB NAL unloaded (final mem %d)\n",
- atomic_read(&portal_kmemory));
+ atomic_read (&libcfs_kmemory));
+
+ kibnal_data.kib_init = IBNAL_INIT_NOTHING;
+ PORTAL_MODULE_UNUSE;
+}
+
+int
+kibnal_get_ipoibidx(void)
+{
+ /* NB single threaded! */
+ static struct ib_port_properties port_props;
+
+ int ipoibidx = 0;
+ int devidx;
+ int port;
+ int rc;
+ struct ib_device *device;
+
+ for (devidx = 0; devidx <= kibnal_data.kib_hca_idx; devidx++) {
+ device = ib_device_get_by_index(devidx);
+
+ if (device == NULL) {
+ CERROR("Can't get IB device %d\n", devidx);
+ return -1;
+ }
+
+ for (port = 1; port <= 2; port++) {
+ if (devidx == kibnal_data.kib_hca_idx &&
+ port == kibnal_data.kib_port)
+ return ipoibidx;
+
+ rc = ib_port_properties_get(device, port,
+ &port_props);
+ if (rc == 0)
+ ipoibidx++;
+ }
+ }
- koibnal_data.koib_init = OPENIBNAL_INIT_NOTHING;
+ LBUG();
+ return -1;
}
int
-koibnal_api_startup (nal_t *nal, ptl_pid_t requested_pid,
- ptl_ni_limits_t *requested_limits,
- ptl_ni_limits_t *actual_limits)
+kibnal_startup (lnet_ni_t *ni)
{
- ptl_process_id_t process_id;
- int pkmem = atomic_read(&portal_kmemory);
+ char ipif_name[32];
+ __u32 ip;
+ __u32 netmask;
+ int up;
+ struct timeval tv;
int rc;
+ int hca;
+ int port;
int i;
+ int nob;
+
+ LASSERT (ni->ni_lnd == &the_kiblnd);
- LASSERT (nal == &koibnal_api);
+ /* Only 1 instance supported */
+ if (kibnal_data.kib_init != IBNAL_INIT_NOTHING) {
+ CERROR ("Only 1 instance supported\n");
+ return -EPERM;
+ }
- if (nal->nal_refct != 0) {
- if (actual_limits != NULL)
- *actual_limits = koibnal_lib.libnal_ni.ni_actual_limits;
- /* This module got the first ref */
- PORTAL_MODULE_USE;
- return (PTL_OK);
+ if (*kibnal_tunables.kib_credits > *kibnal_tunables.kib_ntx) {
+ CERROR ("Can't set credits(%d) > ntx(%d)\n",
+ *kibnal_tunables.kib_credits,
+ *kibnal_tunables.kib_ntx);
+ return -EINVAL;
}
- LASSERT (koibnal_data.koib_init == OPENIBNAL_INIT_NOTHING);
+ memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */
- memset (&koibnal_data, 0, sizeof (koibnal_data)); /* zero pointers, flags etc */
+ ni->ni_maxtxcredits = *kibnal_tunables.kib_credits;
+ ni->ni_peertxcredits = *kibnal_tunables.kib_peercredits;
- init_MUTEX (&koibnal_data.koib_nid_mutex);
- init_MUTEX_LOCKED (&koibnal_data.koib_nid_signal);
- koibnal_data.koib_nid = PTL_NID_ANY;
+ CLASSERT (LNET_MAX_INTERFACES > 1);
- rwlock_init(&koibnal_data.koib_global_lock);
- koibnal_data.koib_peer_hash_size = OPENIBNAL_PEER_HASH_SIZE;
- PORTAL_ALLOC (koibnal_data.koib_peers,
- sizeof (struct list_head) * koibnal_data.koib_peer_hash_size);
- if (koibnal_data.koib_peers == NULL) {
+ kibnal_data.kib_hca_idx = 0; /* default: first HCA */
+ kibnal_data.kib_port = 0; /* any port */
+
+ if (ni->ni_interfaces[0] != NULL) {
+ /* hca.port specified in 'networks=openib(h.p)' */
+ if (ni->ni_interfaces[1] != NULL) {
+ CERROR("Multiple interfaces not supported\n");
+ return -EPERM;
+ }
+
+ nob = strlen(ni->ni_interfaces[0]);
+ i = sscanf(ni->ni_interfaces[0], "%d.%d%n", &hca, &port, &nob);
+ if (i >= 2 && nob == strlen(ni->ni_interfaces[0])) {
+ kibnal_data.kib_hca_idx = hca;
+ kibnal_data.kib_port = port;
+ } else {
+ nob = strlen(ni->ni_interfaces[0]);
+ i = sscanf(ni->ni_interfaces[0], "%d%n", &hca, &nob);
+
+ if (i >= 1 && nob == strlen(ni->ni_interfaces[0])) {
+ kibnal_data.kib_hca_idx = hca;
+ } else {
+ CERROR("Can't parse interface '%s'\n",
+ ni->ni_interfaces[0]);
+ return -EINVAL;
+ }
+ }
+ }
+
+ kibnal_data.kib_ni = ni;
+ ni->ni_data = &kibnal_data;
+
+ do_gettimeofday(&tv);
+ kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+
+ PORTAL_MODULE_USE;
+
+ rwlock_init(&kibnal_data.kib_global_lock);
+
+ kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE;
+ LIBCFS_ALLOC (kibnal_data.kib_peers,
+ sizeof (struct list_head) * kibnal_data.kib_peer_hash_size);
+ if (kibnal_data.kib_peers == NULL) {
goto failed;
}
- for (i = 0; i < koibnal_data.koib_peer_hash_size; i++)
- INIT_LIST_HEAD(&koibnal_data.koib_peers[i]);
+ for (i = 0; i < kibnal_data.kib_peer_hash_size; i++)
+ INIT_LIST_HEAD(&kibnal_data.kib_peers[i]);
+
+ spin_lock_init (&kibnal_data.kib_reaper_lock);
+ INIT_LIST_HEAD (&kibnal_data.kib_reaper_conns);
+ init_waitqueue_head (&kibnal_data.kib_reaper_waitq);
- spin_lock_init (&koibnal_data.koib_connd_lock);
- INIT_LIST_HEAD (&koibnal_data.koib_connd_peers);
- INIT_LIST_HEAD (&koibnal_data.koib_connd_conns);
- init_waitqueue_head (&koibnal_data.koib_connd_waitq);
+ spin_lock_init (&kibnal_data.kib_connd_lock);
+ INIT_LIST_HEAD (&kibnal_data.kib_connd_acceptq);
+ INIT_LIST_HEAD (&kibnal_data.kib_connd_peers);
+ init_waitqueue_head (&kibnal_data.kib_connd_waitq);
- spin_lock_init (&koibnal_data.koib_sched_lock);
- INIT_LIST_HEAD (&koibnal_data.koib_sched_txq);
- INIT_LIST_HEAD (&koibnal_data.koib_sched_rxq);
- init_waitqueue_head (&koibnal_data.koib_sched_waitq);
+ spin_lock_init (&kibnal_data.kib_sched_lock);
+ INIT_LIST_HEAD (&kibnal_data.kib_sched_txq);
+ INIT_LIST_HEAD (&kibnal_data.kib_sched_rxq);
+ init_waitqueue_head (&kibnal_data.kib_sched_waitq);
- spin_lock_init (&koibnal_data.koib_tx_lock);
- INIT_LIST_HEAD (&koibnal_data.koib_idle_txs);
- INIT_LIST_HEAD (&koibnal_data.koib_idle_nblk_txs);
- init_waitqueue_head(&koibnal_data.koib_idle_tx_waitq);
+ spin_lock_init (&kibnal_data.kib_tx_lock);
+ INIT_LIST_HEAD (&kibnal_data.kib_idle_txs);
- PORTAL_ALLOC (koibnal_data.koib_tx_descs,
- OPENIBNAL_TX_MSGS * sizeof(koib_tx_t));
- if (koibnal_data.koib_tx_descs == NULL) {
+ LIBCFS_ALLOC (kibnal_data.kib_tx_descs,
+ IBNAL_TX_MSGS() * sizeof(kib_tx_t));
+ if (kibnal_data.kib_tx_descs == NULL) {
CERROR ("Can't allocate tx descs\n");
goto failed;
}
/* lists/ptrs/locks initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_DATA;
+ kibnal_data.kib_init = IBNAL_INIT_DATA;
/*****************************************************/
- process_id.pid = requested_pid;
- process_id.nid = koibnal_data.koib_nid;
-
- rc = lib_init(&koibnal_lib, nal, process_id,
- requested_limits, actual_limits);
- if (rc != PTL_OK) {
- CERROR("lib_init failed: error %d\n", rc);
- goto failed;
+ for (i = 0; i < IBNAL_N_SCHED; i++) {
+ rc = kibnal_thread_start (kibnal_scheduler,
+ (void *)((unsigned long)i));
+ if (rc != 0) {
+ CERROR("Can't spawn openibnal scheduler[%d]: %d\n",
+ i, rc);
+ goto failed;
+ }
}
- /* lib interface initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_LIB;
- /*****************************************************/
+ /* must have at least 2 connds to remain responsive to svcqry while
+ * connecting */
+ if (*kibnal_tunables.kib_n_connd < 2)
+ *kibnal_tunables.kib_n_connd = 2;
- for (i = 0; i < OPENIBNAL_N_SCHED; i++) {
- rc = koibnal_thread_start (koibnal_scheduler, (void *)i);
+
+ for (i = 0; i < *kibnal_tunables.kib_n_connd; i++) {
+ rc = kibnal_thread_start (kibnal_connd,
+ (void *)((unsigned long)i));
if (rc != 0) {
- CERROR("Can't spawn openibnal scheduler[%d]: %d\n",
+ CERROR("Can't spawn openibnal connd[%d]: %d\n",
i, rc);
goto failed;
}
}
- rc = koibnal_thread_start (koibnal_connd, NULL);
+ rc = kibnal_thread_start (kibnal_reaper, NULL);
if (rc != 0) {
- CERROR ("Can't spawn openibnal connd: %d\n", rc);
+ CERROR ("Can't spawn openibnal reaper: %d\n", rc);
goto failed;
}
- koibnal_data.koib_device = ib_device_get_by_index(0);
- if (koibnal_data.koib_device == NULL) {
- CERROR ("Can't open ib device 0\n");
+ kibnal_data.kib_device = ib_device_get_by_index(kibnal_data.kib_hca_idx);
+ if (kibnal_data.kib_device == NULL) {
+ CERROR ("Can't open ib device %d\n",
+ kibnal_data.kib_hca_idx);
goto failed;
}
- rc = ib_device_properties_get(koibnal_data.koib_device,
- &koibnal_data.koib_device_props);
+ rc = ib_device_properties_get(kibnal_data.kib_device,
+ &kibnal_data.kib_device_props);
if (rc != 0) {
CERROR ("Can't get device props: %d\n", rc);
goto failed;
}
CDEBUG(D_NET, "Max Initiator: %d Max Responder %d\n",
- koibnal_data.koib_device_props.max_initiator_per_qp,
- koibnal_data.koib_device_props.max_responder_per_qp);
-
- koibnal_data.koib_port = 0;
- for (i = 1; i <= 2; i++) {
- rc = ib_port_properties_get(koibnal_data.koib_device, i,
- &koibnal_data.koib_port_props);
- if (rc == 0) {
- koibnal_data.koib_port = i;
- break;
+ kibnal_data.kib_device_props.max_initiator_per_qp,
+ kibnal_data.kib_device_props.max_responder_per_qp);
+
+ if (kibnal_data.kib_port != 0) {
+ rc = ib_port_properties_get(kibnal_data.kib_device,
+ kibnal_data.kib_port,
+ &kibnal_data.kib_port_props);
+ if (rc != 0) {
+ CERROR("Error %d open port %d on HCA %d\n", rc,
+ kibnal_data.kib_port,
+ kibnal_data.kib_hca_idx);
+ goto failed;
+ }
+ } else {
+ for (i = 1; i <= 2; i++) {
+ rc = ib_port_properties_get(kibnal_data.kib_device, i,
+ &kibnal_data.kib_port_props);
+ if (rc == 0) {
+ kibnal_data.kib_port = i;
+ break;
+ }
+ }
+ if (kibnal_data.kib_port == 0) {
+ CERROR ("Can't find a port\n");
+ goto failed;
}
}
- if (koibnal_data.koib_port == 0) {
- CERROR ("Can't find a port\n");
+
+ i = kibnal_get_ipoibidx();
+ if (i < 0)
+ goto failed;
+
+ snprintf(ipif_name, sizeof(ipif_name), "%s%d",
+ *kibnal_tunables.kib_ipif_basename, i);
+ if (strlen(ipif_name) == sizeof(ipif_name) - 1) {
+ CERROR("IPoIB interface name %s truncated\n", ipif_name);
+ return -EINVAL;
+ }
+
+ rc = libcfs_ipif_query(ipif_name, &up, &ip, &netmask);
+ if (rc != 0) {
+ CERROR("Can't query IPoIB interface %s: %d\n", ipif_name, rc);
+ goto failed;
+ }
+
+ if (!up) {
+ CERROR("Can't query IPoIB interface %s: it's down\n", ipif_name);
goto failed;
}
+
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ip);
- rc = ib_pd_create(koibnal_data.koib_device,
- NULL, &koibnal_data.koib_pd);
+ rc = ib_pd_create(kibnal_data.kib_device,
+ NULL, &kibnal_data.kib_pd);
if (rc != 0) {
CERROR ("Can't create PD: %d\n", rc);
goto failed;
}
/* flag PD initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_PD;
+ kibnal_data.kib_init = IBNAL_INIT_PD;
/*****************************************************/
-#if OPENIBNAL_FMR
+#if IBNAL_FMR
{
- const int pool_size = OPENIBNAL_NTX + OPENIBNAL_NTX_NBLK;
+ const int pool_size = *kibnal_tunables.kib_ntx;
struct ib_fmr_pool_param params = {
- .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ),
.flush_arg = NULL,
.cache = 1,
};
- rc = ib_fmr_pool_create(koibnal_data.koib_pd, ¶ms,
- &koibnal_data.koib_fmr_pool);
+ rc = ib_fmr_pool_create(kibnal_data.kib_pd, ¶ms,
+ &kibnal_data.kib_fmr_pool);
if (rc != 0) {
CERROR ("Can't create FMR pool size %d: %d\n",
pool_size, rc);
}
/* flag FMR pool initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_FMR;
+ kibnal_data.kib_init = IBNAL_INIT_FMR;
#endif
/*****************************************************/
- rc = koibnal_setup_tx_descs();
+ rc = kibnal_setup_tx_descs();
if (rc != 0) {
CERROR ("Can't register tx descs: %d\n", rc);
goto failed;
}
/* flag TX descs initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_TXD;
+ kibnal_data.kib_init = IBNAL_INIT_TXD;
/*****************************************************/
{
struct ib_cq_callback callback = {
- .context = OPENIBNAL_CALLBACK_CTXT,
+ .context = IBNAL_CALLBACK_CTXT,
.policy = IB_CQ_PROVIDER_REARM,
.function = {
- .entry = koibnal_rx_callback,
+ .entry = kibnal_callback,
},
.arg = NULL,
};
- int nentries = OPENIBNAL_RX_CQ_ENTRIES;
+ int nentries = IBNAL_CQ_ENTRIES();
- rc = ib_cq_create (koibnal_data.koib_device,
+ rc = ib_cq_create (kibnal_data.kib_device,
&nentries, &callback, NULL,
- &koibnal_data.koib_rx_cq);
+ &kibnal_data.kib_cq);
if (rc != 0) {
- CERROR ("Can't create RX CQ: %d\n", rc);
+ CERROR ("Can't create CQ: %d\n", rc);
goto failed;
}
/* I only want solicited events */
- rc = ib_cq_request_notification(koibnal_data.koib_rx_cq, 1);
+ rc = ib_cq_request_notification(kibnal_data.kib_cq, 1);
LASSERT (rc == 0);
}
-
- /* flag RX CQ initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_RX_CQ;
- /*****************************************************/
- {
- struct ib_cq_callback callback = {
- .context = OPENIBNAL_CALLBACK_CTXT,
- .policy = IB_CQ_PROVIDER_REARM,
- .function = {
- .entry = koibnal_tx_callback,
- },
- .arg = NULL,
- };
- int nentries = OPENIBNAL_TX_CQ_ENTRIES;
-
- rc = ib_cq_create (koibnal_data.koib_device,
- &nentries, &callback, NULL,
- &koibnal_data.koib_tx_cq);
- if (rc != 0) {
- CERROR ("Can't create RX CQ: %d\n", rc);
- goto failed;
- }
-
- /* I only want solicited events */
- rc = ib_cq_request_notification(koibnal_data.koib_tx_cq, 1);
- LASSERT (rc == 0);
- }
-
- /* flag TX CQ initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_TX_CQ;
+ /* flag CQ initialised */
+ kibnal_data.kib_init = IBNAL_INIT_CQ;
/*****************************************************/
-
- rc = libcfs_nal_cmd_register(OPENIBNAL, &koibnal_cmd, NULL);
- if (rc != 0) {
- CERROR ("Can't initialise command interface (rc = %d)\n", rc);
- goto failed;
- }
+ rc = kibnal_start_ib_listener();
+ if (rc != 0)
+ goto failed;
+
/* flag everything initialised */
- koibnal_data.koib_init = OPENIBNAL_INIT_ALL;
+ kibnal_data.kib_init = IBNAL_INIT_ALL;
/*****************************************************/
- printk(KERN_INFO "Lustre: OpenIB NAL loaded "
- "(initial mem %d)\n", pkmem);
-
- return (PTL_OK);
+ return 0;
failed:
- koibnal_api_shutdown (&koibnal_api);
- return (PTL_FAIL);
+ kibnal_shutdown(ni);
+ return -ENETDOWN;
}
void __exit
-koibnal_module_fini (void)
+kibnal_module_fini (void)
{
-#ifdef CONFIG_SYSCTL
- if (koibnal_tunables.koib_sysctl != NULL)
- unregister_sysctl_table (koibnal_tunables.koib_sysctl);
-#endif
- PtlNIFini(koibnal_ni);
-
- ptl_unregister_nal(OPENIBNAL);
+ lnet_unregister_lnd(&the_kiblnd);
+ kibnal_tunables_fini();
}
int __init
-koibnal_module_init (void)
+kibnal_module_init (void)
{
int rc;
- /* the following must be sizeof(int) for proc_dointvec() */
- LASSERT(sizeof (koibnal_tunables.koib_io_timeout) == sizeof (int));
-
- koibnal_api.nal_ni_init = koibnal_api_startup;
- koibnal_api.nal_ni_fini = koibnal_api_shutdown;
-
- /* Initialise dynamic tunables to defaults once only */
- koibnal_tunables.koib_io_timeout = OPENIBNAL_IO_TIMEOUT;
-
- rc = ptl_register_nal(OPENIBNAL, &koibnal_api);
- if (rc != PTL_OK) {
- CERROR("Can't register OPENIBNAL: %d\n", rc);
- return (-ENOMEM); /* or something... */
- }
-
- /* Pure gateways want the NAL started up at module load time... */
- rc = PtlNIInit(OPENIBNAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &koibnal_ni);
- if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
- ptl_unregister_nal(OPENIBNAL);
- return (-ENODEV);
- }
+ rc = kibnal_tunables_init();
+ if (rc != 0)
+ return rc;
-#ifdef CONFIG_SYSCTL
- /* Press on regardless even if registering sysctl doesn't work */
- koibnal_tunables.koib_sysctl =
- register_sysctl_table (koibnal_top_ctl_table, 0);
-#endif
+ lnet_register_lnd(&the_kiblnd);
+
return (0);
}
MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
-MODULE_DESCRIPTION("Kernel OpenIB NAL v0.01");
+#ifdef USING_TSAPI
+MODULE_DESCRIPTION("Kernel Cisco IB LND v1.00");
+#else
+MODULE_DESCRIPTION("Kernel OpenIB(gen1) LND v1.00");
+#endif
MODULE_LICENSE("GPL");
-module_init(koibnal_module_init);
-module_exit(koibnal_module_fini);
+module_init(kibnal_module_init);
+module_exit(kibnal_module_fini);