X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd.c;h=152173b293b45feb22cc8292258c1a650afe6d8d;hp=dde4cfd759b372256f15f5dd03d45ab52f59f343;hb=HEAD;hpb=546993d587c5fc380e9745eae98f863e02e68575 diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index dde4cfd..7a7ca38 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/klnds/o2iblnd/o2iblnd.c * @@ -35,11 +34,12 @@ */ #include +#include #include #include "o2iblnd.h" -static struct lnet_lnd the_o2iblnd; +static const struct lnet_lnd the_o2iblnd; struct kib_data kiblnd_data; @@ -129,51 +129,51 @@ kiblnd_msgtype2size(int type) } } -static int kiblnd_unpack_rd(struct kib_msg *msg, int flip) +static int kiblnd_unpack_rd(struct kib_msg *msg, bool flip) { struct kib_rdma_desc *rd; - int nob; - int n; - int i; + int nob; + int n; + int i; - LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ || - msg->ibm_type == IBLND_MSG_PUT_ACK); + LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || + msg->ibm_type == IBLND_MSG_PUT_ACK); - rd = msg->ibm_type == IBLND_MSG_GET_REQ ? - &msg->ibm_u.get.ibgm_rd : - &msg->ibm_u.putack.ibpam_rd; + rd = msg->ibm_type == IBLND_MSG_GET_REQ ? + &msg->ibm_u.get.ibgm_rd : + &msg->ibm_u.putack.ibpam_rd; - if (flip) { - __swab32s(&rd->rd_key); - __swab32s(&rd->rd_nfrags); - } + if (flip) { + __swab32s(&rd->rd_key); + __swab32s(&rd->rd_nfrags); + } - n = rd->rd_nfrags; + n = rd->rd_nfrags; - if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { - CERROR("Bad nfrags: %d, should be 0 < n <= %d\n", - n, IBLND_MAX_RDMA_FRAGS); - return 1; - } + if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { + CERROR("Bad nfrags: %d, should be 0 < n <= %d\n", + n, IBLND_MAX_RDMA_FRAGS); + return 1; + } nob = offsetof(struct kib_msg, ibm_u) + - kiblnd_rd_msg_size(rd, msg->ibm_type, n); + kiblnd_rd_msg_size(rd, msg->ibm_type, n); - if (msg->ibm_nob < nob) { - CERROR("Short %s: %d(%d)\n", - kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob); - return 1; - } + if (msg->ibm_nob < nob) { + CERROR("Short %s: %d(%d)\n", + kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob); + return 1; + } - if (!flip) - return 0; + if (!flip) + return 0; - for (i = 0; i < n; i++) { - __swab32s(&rd->rd_frags[i].rf_nob); - __swab64s(&rd->rd_frags[i].rf_addr); - } + for (i = 0; i < n; i++) { + __swab32s(&rd->rd_frags[i].rf_nob); + __swab64s(&rd->rd_frags[i].rf_addr); + } - return 0; + return 0; } void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version, @@ -181,134 +181,136 @@ void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version, { struct kib_net *net = ni->ni_data; - /* CAVEAT EMPTOR! all message fields not set here should have been - * initialised previously. */ - msg->ibm_magic = IBLND_MSG_MAGIC; - msg->ibm_version = version; - /* ibm_type */ - msg->ibm_credits = credits; - /* ibm_nob */ - msg->ibm_cksum = 0; - msg->ibm_srcnid = ni->ni_nid; - msg->ibm_srcstamp = net->ibn_incarnation; - msg->ibm_dstnid = dstnid; - msg->ibm_dststamp = dststamp; - - if (*kiblnd_tunables.kib_cksum) { - /* NB ibm_cksum zero while computing cksum */ - msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob); - } + /* CAVEAT EMPTOR! all message fields not set here should have been + * initialised previously. + */ + msg->ibm_magic = IBLND_MSG_MAGIC; + msg->ibm_version = version; + /* ibm_type */ + msg->ibm_credits = credits; + /* ibm_nob */ + msg->ibm_cksum = 0; + msg->ibm_srcnid = lnet_nid_to_nid4(&ni->ni_nid); + msg->ibm_srcstamp = net->ibn_incarnation; + msg->ibm_dstnid = dstnid; + msg->ibm_dststamp = dststamp; + + if (*kiblnd_tunables.kib_cksum) { + /* NB ibm_cksum zero while computing cksum */ + msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob); + } } int kiblnd_unpack_msg(struct kib_msg *msg, int nob) { const int hdr_size = offsetof(struct kib_msg, ibm_u); - __u32 msg_cksum; - __u16 version; - int msg_nob; - int flip; - - /* 6 bytes are enough to have received magic + version */ - if (nob < 6) { - CERROR("Short message: %d\n", nob); - return -EPROTO; - } + __u32 msg_cksum; + __u16 version; + int msg_nob; + bool flip; - if (msg->ibm_magic == IBLND_MSG_MAGIC) { - flip = 0; - } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) { - flip = 1; - } else { - CERROR("Bad magic: %08x\n", msg->ibm_magic); - return -EPROTO; - } + /* 6 bytes are enough to have received magic + version */ + if (nob < 6) { + CERROR("Short message: %d\n", nob); + return -EPROTO; + } - version = flip ? __swab16(msg->ibm_version) : msg->ibm_version; - if (version != IBLND_MSG_VERSION && - version != IBLND_MSG_VERSION_1) { - CERROR("Bad version: %x\n", version); - return -EPROTO; - } + if (msg->ibm_magic == IBLND_MSG_MAGIC) { + flip = false; + } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) { + flip = true; + } else { + CERROR("Bad magic: %08x\n", msg->ibm_magic); + return -EPROTO; + } - if (nob < hdr_size) { - CERROR("Short message: %d\n", nob); - return -EPROTO; - } + version = flip ? __swab16(msg->ibm_version) : msg->ibm_version; + if (version != IBLND_MSG_VERSION && + version != IBLND_MSG_VERSION_1) { + CERROR("Bad version: %x\n", version); + return -EPROTO; + } - msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob; - if (msg_nob > nob) { - CERROR("Short message: got %d, wanted %d\n", nob, msg_nob); - return -EPROTO; - } + if (nob < hdr_size) { + CERROR("Short message: %d\n", nob); + return -EPROTO; + } - /* checksum must be computed with ibm_cksum zero and BEFORE anything - * gets flipped */ - msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; - msg->ibm_cksum = 0; - if (msg_cksum != 0 && - msg_cksum != kiblnd_cksum(msg, msg_nob)) { - CERROR("Bad checksum\n"); - return -EPROTO; - } + msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob; + if (msg_nob > nob) { + CERROR("Short message: got %d, wanted %d\n", nob, msg_nob); + return -EPROTO; + } - msg->ibm_cksum = msg_cksum; - - if (flip) { - /* leave magic unflipped as a clue to peer_ni endianness */ - msg->ibm_version = version; - CLASSERT (sizeof(msg->ibm_type) == 1); - CLASSERT (sizeof(msg->ibm_credits) == 1); - msg->ibm_nob = msg_nob; - __swab64s(&msg->ibm_srcnid); - __swab64s(&msg->ibm_srcstamp); - __swab64s(&msg->ibm_dstnid); - __swab64s(&msg->ibm_dststamp); - } + /* checksum must be computed with ibm_cksum zero and BEFORE anything + * gets flipped + */ + msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; + msg->ibm_cksum = 0; + if (msg_cksum != 0 && + msg_cksum != kiblnd_cksum(msg, msg_nob)) { + CERROR("Bad checksum\n"); + return -EPROTO; + } - if (msg->ibm_srcnid == LNET_NID_ANY) { - CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid)); - return -EPROTO; - } + msg->ibm_cksum = msg_cksum; - if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) { - CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), - msg_nob, kiblnd_msgtype2size(msg->ibm_type)); - return -EPROTO; - } + if (flip) { + /* leave magic unflipped as a clue to peer_ni endianness */ + msg->ibm_version = version; + BUILD_BUG_ON(sizeof(msg->ibm_type) != 1); + BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1); + msg->ibm_nob = msg_nob; + __swab64s(&msg->ibm_srcnid); + __swab64s(&msg->ibm_srcstamp); + __swab64s(&msg->ibm_dstnid); + __swab64s(&msg->ibm_dststamp); + } - switch (msg->ibm_type) { - default: - CERROR("Unknown message type %x\n", msg->ibm_type); - return -EPROTO; + if (msg->ibm_srcnid == LNET_NID_ANY) { + CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid)); + return -EPROTO; + } - case IBLND_MSG_NOOP: - case IBLND_MSG_IMMEDIATE: - case IBLND_MSG_PUT_REQ: - break; + if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) { + CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), + msg_nob, kiblnd_msgtype2size(msg->ibm_type)); + return -EPROTO; + } - case IBLND_MSG_PUT_ACK: - case IBLND_MSG_GET_REQ: - if (kiblnd_unpack_rd(msg, flip)) - return -EPROTO; - break; + switch (msg->ibm_type) { + default: + CERROR("Unknown message type %x\n", msg->ibm_type); + return -EPROTO; - case IBLND_MSG_PUT_NAK: - case IBLND_MSG_PUT_DONE: - case IBLND_MSG_GET_DONE: - if (flip) - __swab32s(&msg->ibm_u.completion.ibcm_status); - break; + case IBLND_MSG_NOOP: + case IBLND_MSG_IMMEDIATE: + case IBLND_MSG_PUT_REQ: + break; - case IBLND_MSG_CONNREQ: - case IBLND_MSG_CONNACK: - if (flip) { - __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth); - __swab16s(&msg->ibm_u.connparams.ibcp_max_frags); - __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size); - } - break; - } - return 0; + case IBLND_MSG_PUT_ACK: + case IBLND_MSG_GET_REQ: + if (kiblnd_unpack_rd(msg, flip)) + return -EPROTO; + break; + + case IBLND_MSG_PUT_NAK: + case IBLND_MSG_PUT_DONE: + case IBLND_MSG_GET_DONE: + if (flip) + __swab32s(&msg->ibm_u.completion.ibcm_status); + break; + + case IBLND_MSG_CONNREQ: + case IBLND_MSG_CONNACK: + if (flip) { + __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth); + __swab16s(&msg->ibm_u.connparams.ibcp_max_frags); + __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size); + } + break; + } + return 0; } int @@ -324,10 +326,10 @@ kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp, LASSERT(nid != LNET_NID_ANY); LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni)); - if (peer_ni == NULL) { - CERROR("Cannot allocate peer_ni\n"); - return -ENOMEM; - } + if (!peer_ni) { + CERROR("Cannot allocate peer_ni\n"); + return -ENOMEM; + } peer_ni->ibp_ni = ni; peer_ni->ibp_nid = nid; @@ -335,9 +337,11 @@ kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp, peer_ni->ibp_last_alive = 0; peer_ni->ibp_max_frags = IBLND_MAX_RDMA_FRAGS; peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits; - atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */ + peer_ni->ibp_queue_depth_mod = 0; /* try to use the default */ + kref_init(&peer_ni->ibp_kref); + atomic_set(&peer_ni->ibp_nconns, 0); - INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */ + INIT_HLIST_NODE(&peer_ni->ibp_list); INIT_LIST_HEAD(&peer_ni->ibp_conns); INIT_LIST_HEAD(&peer_ni->ibp_tx_queue); @@ -356,12 +360,13 @@ kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp, } void -kiblnd_destroy_peer(struct kib_peer_ni *peer_ni) +kiblnd_destroy_peer(struct kref *kref) { + struct kib_peer_ni *peer_ni = container_of(kref, struct kib_peer_ni, + ibp_kref); struct kib_net *net = peer_ni->ibp_ni->ni_data; LASSERT(net != NULL); - LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0); LASSERT(!kiblnd_peer_active(peer_ni)); LASSERT(kiblnd_peer_idle(peer_ni)); LASSERT(list_empty(&peer_ni->ibp_tx_queue)); @@ -371,22 +376,22 @@ kiblnd_destroy_peer(struct kib_peer_ni *peer_ni) /* NB a peer_ni's connections keep a reference on their peer_ni until * they are destroyed, so we can be assured that _all_ state to do * with this peer_ni has been cleaned up when its refcount drops to - * zero. */ - atomic_dec(&net->ibn_npeers); + * zero. + */ + if (atomic_dec_and_test(&net->ibn_npeers)) + wake_up_var(&net->ibn_npeers); } struct kib_peer_ni * kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference - * that this creates */ - struct list_head *peer_list = kiblnd_nid2peerlist(nid); - struct list_head *tmp; - struct kib_peer_ni *peer_ni; - - list_for_each(tmp, peer_list) { + * that this creates + */ + struct kib_peer_ni *peer_ni; - peer_ni = list_entry(tmp, struct kib_peer_ni, ibp_list); + hash_for_each_possible(kiblnd_data.kib_peers, peer_ni, + ibp_list, nid) { LASSERT(!kiblnd_peer_idle(peer_ni)); /* @@ -396,12 +401,12 @@ kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid) * created. */ if (peer_ni->ibp_nid != nid || - peer_ni->ibp_ni->ni_nid != ni->ni_nid) + !nid_same(&peer_ni->ibp_ni->ni_nid, &ni->ni_nid)) continue; CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n", peer_ni, libcfs_nid2str(nid), - atomic_read(&peer_ni->ibp_refcount), + kref_read(&peer_ni->ibp_kref), peer_ni->ibp_version); return peer_ni; } @@ -413,43 +418,129 @@ kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni) { LASSERT(list_empty(&peer_ni->ibp_conns)); - LASSERT (kiblnd_peer_active(peer_ni)); - list_del_init(&peer_ni->ibp_list); - /* lose peerlist's ref */ - kiblnd_peer_decref(peer_ni); + LASSERT(kiblnd_peer_active(peer_ni)); + hlist_del_init(&peer_ni->ibp_list); + /* lose peerlist's ref */ + kiblnd_peer_decref(peer_ni); +} + + +static void +kiblnd_debug_rx(struct kib_rx *rx) +{ + CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n", + rx, rx->rx_msg->ibm_type, + rx->rx_msg->ibm_credits); +} + +static void +kiblnd_debug_tx(struct kib_tx *tx) +{ + CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld " + "cookie %#llx msg %s%s type %x cred %d\n", + tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting, + tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie, + tx->tx_lntmsg[0] == NULL ? "-" : "!", + tx->tx_lntmsg[1] == NULL ? "-" : "!", + tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits); } +static void +kiblnd_debug_conn(struct kib_conn *conn) +{ + struct list_head *tmp; + int i; + + spin_lock(&conn->ibc_lock); + + CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n", + atomic_read(&conn->ibc_refcount), conn, + conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d " + " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted, + conn->ibc_nsends_posted, conn->ibc_credits, + conn->ibc_outstanding_credits, conn->ibc_reserved_credits); + CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error); + + CDEBUG(D_CONSOLE, " early_rxs:\n"); + list_for_each(tmp, &conn->ibc_early_rxs) + kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list)); + + CDEBUG(D_CONSOLE, " tx_noops:\n"); + list_for_each(tmp, &conn->ibc_tx_noops) + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); + + CDEBUG(D_CONSOLE, " tx_queue_nocred:\n"); + list_for_each(tmp, &conn->ibc_tx_queue_nocred) + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); + + CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n"); + list_for_each(tmp, &conn->ibc_tx_queue_rsrvd) + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); + + CDEBUG(D_CONSOLE, " tx_queue:\n"); + list_for_each(tmp, &conn->ibc_tx_queue) + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); + + CDEBUG(D_CONSOLE, " active_txs:\n"); + list_for_each(tmp, &conn->ibc_active_txs) + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); + + CDEBUG(D_CONSOLE, " rxs:\n"); + for (i = 0; i < IBLND_RX_MSGS(conn); i++) + kiblnd_debug_rx(&conn->ibc_rxs[i]); + + spin_unlock(&conn->ibc_lock); +} + +static void +kiblnd_dump_peer_debug_info(struct kib_peer_ni *peer_ni) +{ + struct kib_conn *conn; + struct kib_conn *cnxt; + int count = 0; + + CDEBUG(D_CONSOLE, "[last_alive, races, reconnected, error]: %lld, %d, %d, %d\n", + peer_ni->ibp_last_alive, + peer_ni->ibp_races, + peer_ni->ibp_reconnected, + peer_ni->ibp_error); + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) { + CDEBUG(D_CONSOLE, "Conn %d:\n", count); + kiblnd_debug_conn(conn); + count++; + } +} + + static int -kiblnd_get_peer_info(struct lnet_ni *ni, int index, +kiblnd_get_peer_info(struct lnet_ni *ni, lnet_nid_t nid, int index, lnet_nid_t *nidp, int *count) { struct kib_peer_ni *peer_ni; - struct list_head *ptmp; int i; unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { + hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) { + LASSERT(!kiblnd_peer_idle(peer_ni)); - peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); - LASSERT(!kiblnd_peer_idle(peer_ni)); + if (peer_ni->ibp_ni != ni) + continue; - if (peer_ni->ibp_ni != ni) - continue; + if (peer_ni->ibp_nid == nid) + kiblnd_dump_peer_debug_info(peer_ni); - if (index-- > 0) - continue; + if (index-- > 0) + continue; - *nidp = peer_ni->ibp_nid; - *count = atomic_read(&peer_ni->ibp_refcount); + *nidp = peer_ni->ibp_nid; + *count = kref_read(&peer_ni->ibp_kref); - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return 0; - } + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + return 0; } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -459,18 +550,15 @@ kiblnd_get_peer_info(struct lnet_ni *ni, int index, static void kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni) { - struct list_head *ctmp; - struct list_head *cnxt; + struct kib_conn *cnxt; struct kib_conn *conn; if (list_empty(&peer_ni->ibp_conns)) { kiblnd_unlink_peer_locked(peer_ni); } else { - list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); - + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) kiblnd_close_conn_locked(conn, 0); - } /* NB closing peer_ni's last conn unlinked it. */ } /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the @@ -481,27 +569,27 @@ static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - struct kib_peer_ni *peer_ni; - int lo; - int hi; - int i; - unsigned long flags; - int rc = -ENOENT; + struct hlist_node *pnxt; + struct kib_peer_ni *peer_ni; + int lo; + int hi; + int i; + unsigned long flags; + int rc = -ENOENT; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (nid != LNET_NID_ANY) { - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - } else { - lo = 0; - hi = kiblnd_data.kib_peer_hash_size - 1; - } + if (nid != LNET_NID_ANY) { + lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers)); + hi = lo; + } else { + lo = 0; + hi = HASH_SIZE(kiblnd_data.kib_peers) - 1; + } for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + hlist_for_each_entry_safe(peer_ni, pnxt, + &kiblnd_data.kib_peers[i], ibp_list) { LASSERT(!kiblnd_peer_idle(peer_ni)); if (peer_ni->ibp_ni != ni) @@ -532,143 +620,45 @@ kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid) static struct kib_conn * kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index) { - struct kib_peer_ni *peer_ni; - struct list_head *ptmp; + struct kib_peer_ni *peer_ni; struct kib_conn *conn; - struct list_head *ctmp; - int i; - unsigned long flags; + int i; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { + hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) { + LASSERT(!kiblnd_peer_idle(peer_ni)); - peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); - LASSERT(!kiblnd_peer_idle(peer_ni)); + if (peer_ni->ibp_ni != ni) + continue; - if (peer_ni->ibp_ni != ni) + list_for_each_entry(conn, &peer_ni->ibp_conns, + ibc_list) { + if (index-- > 0) continue; - list_for_each(ctmp, &peer_ni->ibp_conns) { - if (index-- > 0) - continue; - - conn = list_entry(ctmp, struct kib_conn, ibc_list); - kiblnd_conn_addref(conn); - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return conn; - } + kiblnd_conn_addref(conn); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, + flags); + return conn; } } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - return NULL; -} - -static void -kiblnd_debug_rx(struct kib_rx *rx) -{ - CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n", - rx, rx->rx_msg->ibm_type, - rx->rx_msg->ibm_credits); -} - -static void -kiblnd_debug_tx(struct kib_tx *tx) -{ - CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld " - "cookie %#llx msg %s%s type %x cred %d\n", - tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting, - tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie, - tx->tx_lntmsg[0] == NULL ? "-" : "!", - tx->tx_lntmsg[1] == NULL ? "-" : "!", - tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits); -} - -void -kiblnd_debug_conn(struct kib_conn *conn) -{ - struct list_head *tmp; - int i; - - spin_lock(&conn->ibc_lock); - - CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n", - atomic_read(&conn->ibc_refcount), conn, - conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d " - " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted, - conn->ibc_nsends_posted, conn->ibc_credits, - conn->ibc_outstanding_credits, conn->ibc_reserved_credits); - CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error); - - CDEBUG(D_CONSOLE, " early_rxs:\n"); - list_for_each(tmp, &conn->ibc_early_rxs) - kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list)); - - CDEBUG(D_CONSOLE, " tx_noops:\n"); - list_for_each(tmp, &conn->ibc_tx_noops) - kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); - - CDEBUG(D_CONSOLE, " tx_queue_nocred:\n"); - list_for_each(tmp, &conn->ibc_tx_queue_nocred) - kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); - - CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n"); - list_for_each(tmp, &conn->ibc_tx_queue_rsrvd) - kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); - - CDEBUG(D_CONSOLE, " tx_queue:\n"); - list_for_each(tmp, &conn->ibc_tx_queue) - kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); - - CDEBUG(D_CONSOLE, " active_txs:\n"); - list_for_each(tmp, &conn->ibc_active_txs) - kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); - - CDEBUG(D_CONSOLE, " rxs:\n"); - for (i = 0; i < IBLND_RX_MSGS(conn); i++) - kiblnd_debug_rx(&conn->ibc_rxs[i]); - - spin_unlock(&conn->ibc_lock); -} - -int -kiblnd_translate_mtu(int value) -{ - switch (value) { - default: - return -1; - case 0: - return 0; - case 256: - return IB_MTU_256; - case 512: - return IB_MTU_512; - case 1024: - return IB_MTU_1024; - case 2048: - return IB_MTU_2048; - case 4096: - return IB_MTU_4096; - } + return NULL; } static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) { - int mtu; - /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ if (cmid->route.path_rec == NULL) return; - mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); - LASSERT (mtu >= 0); - if (mtu != 0) - cmid->route.path_rec->mtu = mtu; + if (*kiblnd_tunables.kib_ib_mtu) + cmid->route.path_rec->mtu = + ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu); } static int @@ -686,8 +676,12 @@ kiblnd_get_completion_vector(struct kib_conn *conn, int cpt) mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); - /* hash NID to CPU id in this partition... */ - ibp_nid = conn->ibc_peer->ibp_nid; + /* hash NID to CPU id in this partition... when targeting a single peer + * with multiple QPs, to engage more cores in CQ processing to a single + * peer, use ibp_nconns to salt the value the comp_vector value + */ + ibp_nid = conn->ibc_peer->ibp_nid + + atomic_read(&conn->ibc_peer->ibp_nconns); off = do_div(ibp_nid, cpumask_weight(*mask)); for_each_cpu(i, *mask) { if (off-- == 0) @@ -736,10 +730,9 @@ static unsigned int kiblnd_send_wrs(struct kib_conn *conn) */ int ret; int multiplier = 1 + conn->ibc_max_frags; - enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps; /* FastReg needs two extra WRs for map and invalidate */ - if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) + if (IS_FAST_REG_DEV(conn->ibc_hdev->ibh_dev)) multiplier += 2; /* account for a maximum of ibc_queue_depth in-flight transfers */ @@ -772,9 +765,9 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, rwlock_t *glock = &kiblnd_data.kib_global_lock; struct kib_net *net = peer_ni->ibp_ni->ni_data; struct kib_dev *dev; - struct ib_qp_init_attr *init_qp_attr; + struct ib_qp_init_attr init_qp_attr = {}; struct kib_sched_info *sched; -#ifdef HAVE_IB_CQ_INIT_ATTR +#ifdef HAVE_OFED_IB_CQ_INIT_ATTR struct ib_cq_init_attr cq_attr = {}; #endif struct kib_conn *conn; @@ -803,19 +796,11 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, */ cpt = sched->ibs_cpt; - LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, - sizeof(*init_qp_attr)); - if (init_qp_attr == NULL) { - CERROR("Can't allocate qp_attr for %s\n", - libcfs_nid2str(peer_ni->ibp_nid)); - goto failed_0; - } - LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); if (conn == NULL) { CERROR("Can't allocate connection for %s\n", libcfs_nid2str(peer_ni->ibp_nid)); - goto failed_1; + goto failed_0; } conn->ibc_state = IBLND_CONN_INIT; @@ -872,7 +857,7 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); -#ifdef HAVE_IB_CQ_INIT_ATTR +#ifdef HAVE_OFED_IB_CQ_INIT_ATTR cq_attr.cqe = IBLND_CQ_ENTRIES(conn); cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, @@ -904,40 +889,57 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, goto failed_2; } - init_qp_attr->event_handler = kiblnd_qp_event; - init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge; - init_qp_attr->cap.max_recv_sge = 1; - init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; - init_qp_attr->qp_type = IB_QPT_RC; - init_qp_attr->send_cq = cq; - init_qp_attr->recv_cq = cq; - /* - * kiblnd_send_wrs() can change the connection's queue depth if - * the maximum work requests for the device is maxed out - */ - init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); + init_qp_attr.event_handler = kiblnd_qp_event; + init_qp_attr.qp_context = conn; + init_qp_attr.cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge; + init_qp_attr.cap.max_recv_sge = 1; + init_qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_qp_attr.qp_type = IB_QPT_RC; + init_qp_attr.send_cq = cq; + init_qp_attr.recv_cq = cq; + + if (peer_ni->ibp_queue_depth_mod && + peer_ni->ibp_queue_depth_mod < peer_ni->ibp_queue_depth) { + conn->ibc_queue_depth = peer_ni->ibp_queue_depth_mod; + CDEBUG(D_NET, "Use reduced queue depth %u (from %u)\n", + peer_ni->ibp_queue_depth_mod, + peer_ni->ibp_queue_depth); + } + + do { + /* kiblnd_send_wrs() can change the connection's queue depth if + * the maximum work requests for the device is maxed out + */ + init_qp_attr.cap.max_send_wr = kiblnd_send_wrs(conn); + init_qp_attr.cap.max_recv_wr = IBLND_RECV_WRS(conn); + rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, + &init_qp_attr); + if (rc != -ENOMEM || conn->ibc_queue_depth < 2) + break; + conn->ibc_queue_depth--; + } while (rc); - rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); if (rc) { CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, " "send_sge: %d, recv_sge: %d\n", - rc, init_qp_attr->cap.max_send_wr, - init_qp_attr->cap.max_recv_wr, - init_qp_attr->cap.max_send_sge, - init_qp_attr->cap.max_recv_sge); + rc, init_qp_attr.cap.max_send_wr, + init_qp_attr.cap.max_recv_wr, + init_qp_attr.cap.max_send_sge, + init_qp_attr.cap.max_recv_sge); goto failed_2; } conn->ibc_sched = sched; - if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth) + if (!peer_ni->ibp_queue_depth_mod && + conn->ibc_queue_depth != peer_ni->ibp_queue_depth) { CWARN("peer %s - queue depth reduced from %u to %u" " to allow for qp creation\n", libcfs_nid2str(peer_ni->ibp_nid), peer_ni->ibp_queue_depth, conn->ibc_queue_depth); + peer_ni->ibp_queue_depth_mod = conn->ibc_queue_depth; + } LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); @@ -953,8 +955,6 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, kiblnd_map_rx_descs(conn); - LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); - /* 1 ref for caller and each rxmsg */ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); conn->ibc_nrx = IBLND_RX_MSGS(conn); @@ -994,14 +994,13 @@ kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, conn->ibc_state = state; /* 1 more conn */ + atomic_inc(&peer_ni->ibp_nconns); atomic_inc(&net->ibn_nconns); return conn; failed_2: kiblnd_destroy_conn(conn); LIBCFS_FREE(conn, sizeof(*conn)); - failed_1: - LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } @@ -1010,8 +1009,7 @@ void kiblnd_destroy_conn(struct kib_conn *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - struct kib_peer_ni *peer_ni = conn->ibc_peer; - int rc; + struct kib_peer_ni *peer_ni = conn->ibc_peer; LASSERT (!in_interrupt()); LASSERT (atomic_read(&conn->ibc_refcount) == 0); @@ -1042,11 +1040,8 @@ kiblnd_destroy_conn(struct kib_conn *conn) if (cmid != NULL && cmid->qp != NULL) rdma_destroy_qp(cmid); - if (conn->ibc_cq != NULL) { - rc = ib_destroy_cq(conn->ibc_cq); - if (rc != 0) - CWARN("Error destroying CQ: %d\n", rc); - } + if (conn->ibc_cq) + ib_destroy_cq(conn->ibc_cq); kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED, LNET_MSG_STATUS_OK); @@ -1054,10 +1049,8 @@ kiblnd_destroy_conn(struct kib_conn *conn) if (conn->ibc_rx_pages != NULL) kiblnd_unmap_rx_descs(conn); - if (conn->ibc_rxs != NULL) { - LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); - } + if (conn->ibc_rxs != NULL) + CFS_FREE_PTR_ARRAY(conn->ibc_rxs, IBLND_RX_MSGS(conn)); if (conn->ibc_connvars != NULL) LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); @@ -1071,6 +1064,7 @@ kiblnd_destroy_conn(struct kib_conn *conn) kiblnd_peer_decref(peer_ni); rdma_destroy_id(cmid); + atomic_dec(&peer_ni->ibp_nconns); atomic_dec(&net->ibn_nconns); } } @@ -1079,13 +1073,11 @@ int kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why) { struct kib_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); + struct kib_conn *cnxt; + int count = 0; + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) { CDEBUG(D_NET, "Closing conn -> %s, " "version: %x, reason: %d\n", libcfs_nid2str(peer_ni->ibp_nid), @@ -1103,13 +1095,11 @@ kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni, int version, __u64 incarnation) { struct kib_conn *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) { - conn = list_entry(ctmp, struct kib_conn, ibc_list); + struct kib_conn *cnxt; + int count = 0; + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) { if (conn->ibc_version == version && conn->ibc_incarnation == incarnation) continue; @@ -1130,28 +1120,27 @@ kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni, static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid) { - struct kib_peer_ni *peer_ni; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - unsigned long flags; - int count = 0; + struct kib_peer_ni *peer_ni; + struct hlist_node *pnxt; + int lo; + int hi; + int i; + unsigned long flags; + int count = 0; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (nid != LNET_NID_ANY) - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - else { + if (nid != LNET_NID_ANY) { + lo = hash_min(nid, HASH_BITS(kiblnd_data.kib_peers)); + hi = lo; + } else { lo = 0; - hi = kiblnd_data.kib_peer_hash_size - 1; + hi = HASH_SIZE(kiblnd_data.kib_peers) - 1; } for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - - peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + hlist_for_each_entry_safe(peer_ni, pnxt, + &kiblnd_data.kib_peers[i], ibp_list) { LASSERT(!kiblnd_peer_idle(peer_ni)); if (peer_ni->ibp_ni != ni) @@ -1184,7 +1173,7 @@ kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) lnet_nid_t nid = 0; int count = 0; - rc = kiblnd_get_peer_info(ni, data->ioc_count, + rc = kiblnd_get_peer_info(ni, data->ioc_nid, data->ioc_count, &nid, &count); data->ioc_nid = nid; data->ioc_count = count; @@ -1227,34 +1216,192 @@ kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) return rc; } -static void -kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when) +static const struct ln_key_list kiblnd_tunables_keys = { + .lkl_maxattr = LNET_NET_O2IBLND_TUNABLES_ATTR_MAX, + .lkl_list = { + [LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS] = { + .lkp_value = "peercredits_hiw", + .lkp_data_type = NLA_U32 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND] = { + .lkp_value = "map_on_demand", + .lkp_data_type = NLA_FLAG + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS] = { + .lkp_value = "concurrent_sends", + .lkp_data_type = NLA_U32 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE] = { + .lkp_value = "fmr_pool_size", + .lkp_data_type = NLA_U32 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER] = { + .lkp_value = "fmr_flush_trigger", + .lkp_data_type = NLA_U32 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE] = { + .lkp_value = "fmr_cache", + .lkp_data_type = NLA_U32 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_NTX] = { + .lkp_value = "ntx", + .lkp_data_type = NLA_U16 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER] = { + .lkp_value = "conns_per_peer", + .lkp_data_type = NLA_U16 + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT] = { + .lkp_value = "timeout", + .lkp_data_type = NLA_U32, + }, + [LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TOS] = { + .lkp_value = "tos", + .lkp_data_type = NLA_S16, + }, + }, +}; + +static int +kiblnd_nl_get(int cmd, struct sk_buff *msg, int type, void *data) +{ + struct lnet_ioctl_config_o2iblnd_tunables *tuns; + struct lnet_ni *ni = data; + + if (!ni || !msg) + return -EINVAL; + + if (cmd != LNET_CMD_NETS || type != LNET_NET_LOCAL_NI_ATTR_LND_TUNABLES) + return -EOPNOTSUPP; + + tuns = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS, + tuns->lnd_peercredits_hiw); + if (tuns->lnd_map_on_demand) { + nla_put_flag(msg, + LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND); + } + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS, + tuns->lnd_concurrent_sends); + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE, + tuns->lnd_fmr_pool_size); + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER, + tuns->lnd_fmr_flush_trigger); + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE, + tuns->lnd_fmr_cache); + nla_put_u16(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_NTX, tuns->lnd_ntx); + nla_put_u16(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER, + tuns->lnd_conns_per_peer); + nla_put_u32(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT, + kiblnd_timeout()); + nla_put_s16(msg, LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TOS, + tuns->lnd_tos); + + return 0; +} + +static inline void +kiblnd_nl_set_default(int cmd, int type, void *data) { - time64_t last_alive = 0; - time64_t now = ktime_get_seconds(); - rwlock_t *glock = &kiblnd_data.kib_global_lock; - struct kib_peer_ni *peer_ni; - unsigned long flags; + struct lnet_lnd_tunables *tunables = data; + struct lnet_ioctl_config_o2iblnd_tunables *lt; + struct lnet_ioctl_config_o2iblnd_tunables *df; - read_lock_irqsave(glock, flags); + lt = &tunables->lnd_tun_u.lnd_o2ib; + df = &kib_default_tunables; + switch (type) { + case LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS: + lt->lnd_peercredits_hiw = df->lnd_peercredits_hiw; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND: + lt->lnd_map_on_demand = df->lnd_map_on_demand; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS: + lt->lnd_concurrent_sends = df->lnd_concurrent_sends; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE: + lt->lnd_fmr_pool_size = df->lnd_fmr_pool_size; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER: + lt->lnd_fmr_flush_trigger = df->lnd_fmr_flush_trigger; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE: + lt->lnd_fmr_cache = df->lnd_fmr_cache; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_NTX: + lt->lnd_ntx = df->lnd_ntx; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT: + lt->lnd_timeout = df->lnd_timeout; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER: + lt->lnd_conns_per_peer = df->lnd_conns_per_peer; + fallthrough; + default: + break; + } - peer_ni = kiblnd_find_peer_locked(ni, nid); - if (peer_ni != NULL) - last_alive = peer_ni->ibp_last_alive; +} - read_unlock_irqrestore(glock, flags); +static int +kiblnd_nl_set(int cmd, struct nlattr *attr, int type, void *data) +{ + struct lnet_lnd_tunables *tunables = data; + int rc = 0; + s64 num; - if (last_alive != 0) - *when = last_alive; + if (cmd != LNET_CMD_NETS) + return -EOPNOTSUPP; - /* peer_ni is not persistent in hash, trigger peer_ni creation - * and connection establishment with a NULL tx */ - if (peer_ni == NULL) - kiblnd_launch_tx(ni, NULL, nid); + if (!attr) { + kiblnd_nl_set_default(cmd, type, data); + return 0; + } + + if (nla_type(attr) != LN_SCALAR_ATTR_INT_VALUE) + return -EINVAL; + + switch (type) { + case LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS: + tunables->lnd_tun_u.lnd_o2ib.lnd_peercredits_hiw = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND: + tunables->lnd_tun_u.lnd_o2ib.lnd_map_on_demand = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS: + tunables->lnd_tun_u.lnd_o2ib.lnd_concurrent_sends = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE: + tunables->lnd_tun_u.lnd_o2ib.lnd_fmr_pool_size = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER: + tunables->lnd_tun_u.lnd_o2ib.lnd_fmr_flush_trigger = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE: + tunables->lnd_tun_u.lnd_o2ib.lnd_fmr_cache = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_NTX: + tunables->lnd_tun_u.lnd_o2ib.lnd_ntx = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT: + tunables->lnd_tun_u.lnd_o2ib.lnd_timeout = nla_get_s64(attr); + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER: + num = nla_get_s64(attr); + if (num >= 0 && num < 128) + tunables->lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = num; + else + rc = -ERANGE; + break; + case LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TOS: + num = nla_get_s64(attr); + tunables->lnd_tun_u.lnd_o2ib.lnd_tos = num; + fallthrough; + default: + break; + } - CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago\n", - libcfs_nid2str(nid), peer_ni, - last_alive ? now - last_alive : -1); + return rc; } static void @@ -1402,8 +1549,7 @@ kiblnd_current_hdev(struct kib_dev *dev) if (i++ % 50 == 0) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 100); + schedule_timeout_interruptible(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); } @@ -1433,11 +1579,11 @@ kiblnd_map_tx_pool(struct kib_tx_pool *tpo) dev = net->ibn_dev; - /* pre-mapped messages are not bigger than 1 page */ - CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE); + /* pre-mapped messages are not bigger than 1 page */ + BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE); - /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0); + /* No fancy arithmetic when we do the buffer calculations */ + BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE != 0); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1474,16 +1620,19 @@ kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo) { LASSERT(fpo->fpo_map_count == 0); +#ifdef HAVE_OFED_FMR_POOL_API if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) { ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool); - } else { + } else +#endif /* HAVE_OFED_FMR_POOL_API */ + { struct kib_fast_reg_descriptor *frd, *tmp; int i = 0; list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, frd_list) { list_del(&frd->frd_list); -#ifndef HAVE_IB_MAP_MR_SG +#ifndef HAVE_OFED_IB_MAP_MR_SG ib_free_fast_reg_page_list(frd->frd_frpl); #endif ib_dereg_mr(frd->frd_mr); @@ -1530,11 +1679,12 @@ kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables, return max(IBLND_FMR_POOL_FLUSH, size); } +#ifdef HAVE_OFED_FMR_POOL_API static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo) { struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_IOV, + .max_pages_per_fmr = IBLND_MAX_RDMA_FRAGS, .page_shift = PAGE_SHIFT, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE), @@ -1558,6 +1708,7 @@ static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, return rc; } +#endif /* HAVE_OFED_FMR_POOL_API */ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo, @@ -1566,7 +1717,9 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fast_reg_descriptor *frd, *tmp; int i, rc; +#ifdef HAVE_OFED_FMR_POOL_API fpo->fpo_is_fmr = false; +#endif INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); fpo->fast_reg.fpo_pool_size = 0; @@ -1580,9 +1733,9 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, } frd->frd_mr = NULL; -#ifndef HAVE_IB_MAP_MR_SG +#ifndef HAVE_OFED_IB_MAP_MR_SG frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev, - LNET_MAX_IOV); + IBLND_MAX_RDMA_FRAGS); if (IS_ERR(frd->frd_frpl)) { rc = PTR_ERR(frd->frd_frpl); CERROR("Failed to allocate ib_fast_reg_page_list: %d\n", @@ -1592,9 +1745,9 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, } #endif -#ifdef HAVE_IB_ALLOC_FAST_REG_MR +#ifdef HAVE_OFED_IB_ALLOC_FAST_REG_MR frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd, - LNET_MAX_IOV); + IBLND_MAX_RDMA_FRAGS); #else /* * it is expected to get here if this is an MLX-5 card. @@ -1612,7 +1765,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, #else IB_MR_TYPE_MEM_REG, #endif - LNET_MAX_IOV); + IBLND_MAX_RDMA_FRAGS); if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) && (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n"); @@ -1624,9 +1777,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, goto out_middle; } - /* There appears to be a bug in MLX5 code where you must - * invalidate the rkey of a new FastReg pool before first - * using it. Thus, I am marking the FRD invalid here. */ + /* indicate that the local invalidate needs to be generated */ frd->frd_valid = false; list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); @@ -1638,7 +1789,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, out_middle: if (frd->frd_mr) ib_dereg_mr(frd->frd_mr); -#ifndef HAVE_IB_MAP_MR_SG +#ifndef HAVE_OFED_IB_MAP_MR_SG if (frd->frd_frpl) ib_free_fast_reg_page_list(frd->frd_frpl); #endif @@ -1648,7 +1799,7 @@ out: list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list, frd_list) { list_del(&frd->frd_list); -#ifndef HAVE_IB_MAP_MR_SG +#ifndef HAVE_OFED_IB_MAP_MR_SG ib_free_fast_reg_page_list(frd->frd_frpl); #endif ib_dereg_mr(frd->frd_mr); @@ -1673,9 +1824,11 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, fpo->fpo_hdev = kiblnd_current_hdev(dev); +#ifdef HAVE_OFED_FMR_POOL_API if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) rc = kiblnd_alloc_fmr_pool(fps, fpo); else +#endif /* HAVE_OFED_FMR_POOL_API */ rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps); if (rc) goto out_fpo; @@ -1695,22 +1848,21 @@ out_fpo: static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies) { + struct kib_fmr_pool *fpo; + if (fps->fps_net == NULL) /* intialized? */ return; spin_lock(&fps->fps_lock); - while (!list_empty(&fps->fps_pool_list)) { - struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next, - struct kib_fmr_pool, - fpo_list); - + while ((fpo = list_first_entry_or_null(&fps->fps_pool_list, + struct kib_fmr_pool, + fpo_list)) != NULL) { fpo->fpo_failed = 1; - list_del(&fpo->fpo_list); if (fpo->fpo_map_count == 0) - list_add(&fpo->fpo_list, zombies); + list_move(&fpo->fpo_list, zombies); else - list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); + list_move(&fpo->fpo_list, &fps->fps_failed_pool_list); } spin_unlock(&fps->fps_lock); @@ -1763,6 +1915,7 @@ kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now) return now >= fpo->fpo_deadline; } +#if defined(HAVE_OFED_FMR_POOL_API) || !defined(HAVE_OFED_IB_MAP_MR_SG) static int kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd) { @@ -1784,6 +1937,7 @@ kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd) return npages; } +#endif void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) @@ -1793,12 +1947,13 @@ kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) struct kib_fmr_poolset *fps; time64_t now = ktime_get_seconds(); struct kib_fmr_pool *tmp; - int rc; if (!fpo) return; fps = fpo->fpo_owner; + +#ifdef HAVE_OFED_FMR_POOL_API if (fpo->fpo_is_fmr) { if (fmr->fmr_pfmr) { ib_fmr_pool_unmap(fmr->fmr_pfmr); @@ -1806,18 +1961,19 @@ kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) } if (status) { - rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool); + int rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool); LASSERT(!rc); } - } else { + } else +#endif /* HAVE_OFED_FMR_POOL_API */ + { struct kib_fast_reg_descriptor *frd = fmr->fmr_frd; - if (frd) { - frd->frd_valid = false; + frd->frd_posted = false; + fmr->fmr_frd = NULL; spin_lock(&fps->fps_lock); list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list); spin_unlock(&fps->fps_lock); - fmr->fmr_frd = NULL; } } fmr->fmr_pool = NULL; @@ -1846,11 +2002,13 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, struct kib_fmr *fmr) { struct kib_fmr_pool *fpo; - __u64 *pages = tx->tx_pages; __u64 version; bool is_rx = (rd != tx->tx_rd); - bool tx_pages_mapped = 0; +#ifdef HAVE_OFED_FMR_POOL_API + __u64 *pages = tx->tx_pages; + bool tx_pages_mapped = false; int npages = 0; +#endif int rc; again: @@ -1860,6 +2018,8 @@ again: fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE; fpo->fpo_map_count++; +#ifdef HAVE_OFED_FMR_POOL_API + fmr->fmr_pfmr = NULL; if (fpo->fpo_is_fmr) { struct ib_pool_fmr *pfmr; @@ -1867,24 +2027,26 @@ again: if (!tx_pages_mapped) { npages = kiblnd_map_tx_pages(tx, rd); - tx_pages_mapped = 1; + tx_pages_mapped = true; } pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool, pages, npages, iov); if (likely(!IS_ERR(pfmr))) { fmr->fmr_key = is_rx ? pfmr->fmr->rkey - : pfmr->fmr->lkey; + : pfmr->fmr->lkey; fmr->fmr_frd = NULL; fmr->fmr_pfmr = pfmr; fmr->fmr_pool = fpo; return 0; } rc = PTR_ERR(pfmr); - } else { + } else +#endif /* HAVE_OFED_FMR_POOL_API */ + { if (!list_empty(&fpo->fast_reg.fpo_pool_list)) { struct kib_fast_reg_descriptor *frd; -#ifdef HAVE_IB_MAP_MR_SG +#ifdef HAVE_OFED_IB_MAP_MR_SG struct ib_reg_wr *wr; int n; #else @@ -1893,13 +2055,14 @@ again: #endif struct ib_mr *mr; - frd = list_first_entry(&fpo->fast_reg.fpo_pool_list, - struct kib_fast_reg_descriptor, - frd_list); + frd = list_first_entry( + &fpo->fast_reg.fpo_pool_list, + struct kib_fast_reg_descriptor, + frd_list); list_del(&frd->frd_list); spin_unlock(&fps->fps_lock); -#ifndef HAVE_IB_MAP_MR_SG +#ifndef HAVE_OFED_IB_MAP_MR_SG frpl = frd->frd_frpl; #endif mr = frd->frd_mr; @@ -1908,6 +2071,7 @@ again: struct ib_rdma_wr *inv_wr; __u32 key = is_rx ? mr->rkey : mr->lkey; + frd->frd_valid = true; inv_wr = &frd->frd_inv_wr; memset(inv_wr, 0, sizeof(*inv_wr)); @@ -1920,17 +2084,17 @@ again: ib_update_fast_reg_key(mr, key); } -#ifdef HAVE_IB_MAP_MR_SG -#ifdef HAVE_IB_MAP_MR_SG_5ARGS +#ifdef HAVE_OFED_IB_MAP_MR_SG +#ifdef HAVE_OFED_IB_MAP_MR_SG_5ARGS n = ib_map_mr_sg(mr, tx->tx_frags, - tx->tx_nfrags, NULL, PAGE_SIZE); + rd->rd_nfrags, NULL, PAGE_SIZE); #else n = ib_map_mr_sg(mr, tx->tx_frags, - tx->tx_nfrags, PAGE_SIZE); -#endif - if (unlikely(n != tx->tx_nfrags)) { - CERROR("Failed to map mr %d/%d " - "elements\n", n, tx->tx_nfrags); + rd->rd_nfrags, PAGE_SIZE); +#endif /* HAVE_OFED_IB_MAP_MR_SG_5ARGS */ + if (unlikely(n != rd->rd_nfrags)) { + CERROR("Failed to map mr %d/%d elements\n", + n, rd->rd_nfrags); return n < 0 ? n : -EINVAL; } @@ -1945,15 +2109,15 @@ again: wr->key = is_rx ? mr->rkey : mr->lkey; wr->access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); -#else +#else /* HAVE_OFED_IB_MAP_MR_SG */ if (!tx_pages_mapped) { npages = kiblnd_map_tx_pages(tx, rd); - tx_pages_mapped = 1; + tx_pages_mapped = true; } LASSERT(npages <= frpl->max_page_list_len); memcpy(frpl->page_list, pages, - sizeof(*pages) * npages); + sizeof(*pages) * npages); /* Prepare FastReg WR */ wr = &frd->frd_fastreg_wr; @@ -1968,16 +2132,16 @@ again: wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT; wr->wr.wr.fast_reg.length = nob; wr->wr.wr.fast_reg.rkey = - is_rx ? mr->rkey : mr->lkey; + is_rx ? mr->rkey : mr->lkey; wr->wr.wr.fast_reg.access_flags = - (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE); -#endif + (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE); +#endif /* HAVE_OFED_IB_MAP_MR_SG */ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey; fmr->fmr_frd = frd; - fmr->fmr_pfmr = NULL; fmr->fmr_pool = fpo; + frd->frd_posted = false; return 0; } spin_unlock(&fps->fps_lock); @@ -2002,7 +2166,7 @@ again: spin_unlock(&fps->fps_lock); CDEBUG(D_NET, "Another thread is allocating new " "FMR pool, waiting for her to complete\n"); - schedule(); + wait_var_event(fps, !fps->fps_increasing); goto again; } @@ -2020,6 +2184,7 @@ again: rc = kiblnd_create_fmr_pool(fps, &fpo); spin_lock(&fps->fps_lock); fps->fps_increasing = 0; + wake_up_var(fps); if (rc == 0) { fps->fps_version++; list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); @@ -2057,8 +2222,9 @@ kiblnd_destroy_pool_list(struct list_head *head) { struct kib_pool *pool; - while (!list_empty(head)) { - pool = list_entry(head->next, struct kib_pool, po_list); + while ((pool = list_first_entry_or_null(head, + struct kib_pool, + po_list)) != NULL) { list_del(&pool->po_list); LASSERT(pool->po_owner != NULL); @@ -2069,20 +2235,20 @@ kiblnd_destroy_pool_list(struct list_head *head) static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) { + struct kib_pool *po; + if (ps->ps_net == NULL) /* intialized? */ return; spin_lock(&ps->ps_lock); - while (!list_empty(&ps->ps_pool_list)) { - struct kib_pool *po = list_entry(ps->ps_pool_list.next, - struct kib_pool, po_list); - + while ((po = list_first_entry_or_null(&ps->ps_pool_list, + struct kib_pool, + po_list)) != NULL) { po->po_failed = 1; - list_del(&po->po_list); if (po->po_allocated == 0) - list_add(&po->po_list, zombies); + list_move(&po->po_list, zombies); else - list_add(&po->po_list, &ps->ps_failed_pool_list); + list_move(&po->po_list, &ps->ps_failed_pool_list); } spin_unlock(&ps->ps_lock); } @@ -2110,15 +2276,15 @@ kiblnd_init_poolset(struct kib_poolset *ps, int cpt, memset(ps, 0, sizeof(struct kib_poolset)); ps->ps_cpt = cpt; - ps->ps_net = net; - ps->ps_pool_create = po_create; - ps->ps_pool_destroy = po_destroy; - ps->ps_node_init = nd_init; - ps->ps_node_fini = nd_fini; - ps->ps_pool_size = size; - if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name)) - >= sizeof(ps->ps_name)) - return -E2BIG; + ps->ps_net = net; + ps->ps_pool_create = po_create; + ps->ps_pool_destroy = po_destroy; + ps->ps_node_init = nd_init; + ps->ps_node_fini = nd_fini; + ps->ps_pool_size = size; + rc = strscpy(ps->ps_name, name, sizeof(ps->ps_name)); + if (rc < 0) + return rc; spin_lock_init(&ps->ps_lock); INIT_LIST_HEAD(&ps->ps_pool_list); INIT_LIST_HEAD(&ps->ps_failed_pool_list); @@ -2208,13 +2374,11 @@ again: /* another thread is allocating a new pool */ spin_unlock(&ps->ps_lock); trips++; - CDEBUG(D_NET, "Another thread is allocating new " - "%s pool, waiting %d HZs for her to complete." - "trips = %d\n", + CDEBUG(D_NET, + "Another thread is allocating new %s pool, waiting %d jiffies for her to complete. trips = %d\n", ps->ps_name, interval, trips); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(interval); + schedule_timeout_interruptible(interval); if (interval < cfs_time_seconds(1)) interval *= 2; @@ -2233,7 +2397,7 @@ again: CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); time_before = ktime_get(); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); - CDEBUG(D_NET, "ps_pool_create took %lld ms to complete", + CDEBUG(D_NET, "ps_pool_create took %lld ms to complete\n", ktime_ms_delta(ktime_get(), time_before)); spin_lock(&ps->ps_lock); @@ -2267,38 +2431,35 @@ kiblnd_destroy_tx_pool(struct kib_pool *pool) if (tpo->tpo_tx_descs == NULL) goto out; - for (i = 0; i < pool->po_size; i++) { + for (i = 0; i < pool->po_size; i++) { struct kib_tx *tx = &tpo->tpo_tx_descs[i]; int wrq_sge = *kiblnd_tunables.kib_wrq_sge; list_del(&tx->tx_list); - if (tx->tx_pages != NULL) - LIBCFS_FREE(tx->tx_pages, - LNET_MAX_IOV * - sizeof(*tx->tx_pages)); - if (tx->tx_frags != NULL) - LIBCFS_FREE(tx->tx_frags, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_frags)); - if (tx->tx_wrq != NULL) - LIBCFS_FREE(tx->tx_wrq, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_wrq)); - if (tx->tx_sge != NULL) - LIBCFS_FREE(tx->tx_sge, - (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge * - sizeof(*tx->tx_sge)); - if (tx->tx_rd != NULL) - LIBCFS_FREE(tx->tx_rd, + if (tx->tx_pages != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_pages, LNET_MAX_IOV); + if (tx->tx_frags != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_frags, + IBLND_MAX_RDMA_FRAGS); + if (tx->tx_wrq != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_wrq, + IBLND_MAX_RDMA_FRAGS); + if (tx->tx_sge != NULL) { + /* +1 is for the lnet header/message itself */ + CFS_FREE_PTR_ARRAY(tx->tx_sge, + (IBLND_MAX_RDMA_FRAGS * + wrq_sge + 1)); + } + if (tx->tx_rd != NULL) + LIBCFS_FREE(tx->tx_rd, offsetof(struct kib_rdma_desc, - rd_frags[IBLND_MAX_RDMA_FRAGS])); - } + rd_frags[IBLND_MAX_RDMA_FRAGS])); + } - LIBCFS_FREE(tpo->tpo_tx_descs, - pool->po_size * sizeof(struct kib_tx)); + CFS_FREE_PTR_ARRAY(tpo->tpo_tx_descs, pool->po_size); out: - kiblnd_fini_pool(pool); - LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool)); + kiblnd_fini_pool(pool); + CFS_FREE_PTR(tpo); } static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts) @@ -2334,7 +2495,7 @@ kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po) npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { CERROR("Can't allocate tx pages: %d\n", npg); - LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool)); + CFS_FREE_PTR(tpo); return -ENOMEM; } @@ -2362,21 +2523,22 @@ kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po) } LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, - (1 + IBLND_MAX_RDMA_FRAGS) * + IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); if (tx->tx_frags == NULL) break; - sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); + sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, - (1 + IBLND_MAX_RDMA_FRAGS) * + IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_wrq)); if (tx->tx_wrq == NULL) break; + /* +1 is for the lnet header/message itself */ LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, - (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge * + (IBLND_MAX_RDMA_FRAGS * wrq_sge + 1) * sizeof(*tx->tx_sge)); if (tx->tx_sge == NULL) break; @@ -2445,7 +2607,7 @@ kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts, int ncpts) { struct lnet_ioctl_config_o2iblnd_tunables *tunables; -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR unsigned long flags; #endif int cpt; @@ -2454,7 +2616,7 @@ kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts, tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* * if lnd_map_on_demand is zero then we have effectively disabled @@ -2507,7 +2669,7 @@ kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts, if (i > 0) LASSERT(i == ncpts); -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR create_tx_pool: #endif net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), @@ -2518,26 +2680,123 @@ kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts, goto failed; } - for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; - rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, - cpt, net, "TX", - kiblnd_tx_pool_size(ni, ncpts), - kiblnd_create_tx_pool, - kiblnd_destroy_tx_pool, - kiblnd_tx_init, NULL); - if (rc != 0) { - CERROR("Can't initialize TX pool for CPT %d: %d\n", - cpt, rc); - goto failed; + for (i = 0; i < ncpts; i++) { + cpt = (cpts == NULL) ? i : cpts[i]; + rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, + cpt, net, "TX", + kiblnd_tx_pool_size(ni, ncpts), + kiblnd_create_tx_pool, + kiblnd_destroy_tx_pool, + kiblnd_tx_init, NULL); + if (rc != 0) { + CERROR("Can't initialize TX pool for CPT %d: %d\n", + cpt, rc); + goto failed; + } + } + + return 0; + failed: + kiblnd_net_fini_pools(net); + LASSERT(rc != 0); + return rc; +} + +static int +kiblnd_port_get_attr(struct kib_hca_dev *hdev) +{ + struct ib_port_attr *port_attr; + int rc; + unsigned long flags; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + + LIBCFS_ALLOC(port_attr, sizeof(*port_attr)); + if (port_attr == NULL) { + CDEBUG(D_NETERROR, "Out of memory\n"); + return -ENOMEM; + } + + rc = ib_query_port(hdev->ibh_ibdev, hdev->ibh_port, port_attr); + + write_lock_irqsave(g_lock, flags); + + if (rc == 0) + hdev->ibh_state = port_attr->state == IB_PORT_ACTIVE + ? IBLND_DEV_PORT_ACTIVE + : IBLND_DEV_PORT_DOWN; + + write_unlock_irqrestore(g_lock, flags); + LIBCFS_FREE(port_attr, sizeof(*port_attr)); + + if (rc != 0) { + CDEBUG(D_NETERROR, "Failed to query IB port: %d\n", rc); + return rc; + } + return 0; +} + +static inline void +kiblnd_set_ni_fatal_on(struct kib_hca_dev *hdev, int val) +{ + struct kib_net *net; + __u32 ni_state_before; + bool update_ping_buf = false; + struct lnet_ni *ni = NULL; + + /* for health check */ + list_for_each_entry(net, &hdev->ibh_dev->ibd_nets, ibn_list) { + ni = net->ibn_ni; + if (val) + CDEBUG(D_NETERROR, "Fatal device error for NI %s\n", + libcfs_nidstr(&ni->ni_nid)); + ni_state_before = lnet_set_link_fatal_state(ni, val); + + if (!update_ping_buf && + (ni->ni_state == LNET_NI_STATE_ACTIVE) && + (val != ni_state_before) && + (net->ibn_init == IBLND_INIT_ALL)) + update_ping_buf = true; + } + + if (update_ping_buf) + lnet_mark_ping_buffer_for_update(); +} + +static void +kiblnd_event_handler(struct ib_event_handler *handler, struct ib_event *event) +{ + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + struct kib_hca_dev *hdev; + unsigned long flags; + + hdev = container_of(handler, struct kib_hca_dev, ibh_event_handler); + + write_lock_irqsave(g_lock, flags); + + switch (event->event) { + case IB_EVENT_DEVICE_FATAL: + CDEBUG(D_NET, "IB device fatal\n"); + hdev->ibh_state = IBLND_DEV_FATAL; + kiblnd_set_ni_fatal_on(hdev, 1); + break; + case IB_EVENT_PORT_ACTIVE: + CDEBUG(D_NET, "IB port active\n"); + if (event->element.port_num == hdev->ibh_port) { + hdev->ibh_state = IBLND_DEV_PORT_ACTIVE; + kiblnd_set_ni_fatal_on(hdev, 0); + } + break; + case IB_EVENT_PORT_ERR: + CDEBUG(D_NET, "IB port err\n"); + if (event->element.port_num == hdev->ibh_port) { + hdev->ibh_state = IBLND_DEV_PORT_DOWN; + kiblnd_set_ni_fatal_on(hdev, 1); } + break; + default: + break; } - - return 0; - failed: - kiblnd_net_fini_pools(net); - LASSERT(rc != 0); - return rc; + write_unlock_irqrestore(g_lock, flags); } static int @@ -2545,6 +2804,7 @@ kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) { struct ib_device_attr *dev_attr; int rc = 0; + int rc2 = 0; /* It's safe to assume a HCA can handle a page size * matching that of the native system */ @@ -2552,7 +2812,7 @@ kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) hdev->ibh_page_size = 1 << PAGE_SHIFT; hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); -#ifndef HAVE_IB_DEVICE_ATTRS +#ifndef HAVE_OFED_IB_DEVICE_ATTRS LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr)); if (dev_attr == NULL) { CERROR("Out of memory\n"); @@ -2572,7 +2832,8 @@ kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) hdev->ibh_max_qp_wr = dev_attr->max_qp_wr; /* Setup device Memory Registration capabilities */ -#ifdef HAVE_IB_DEVICE_OPS +#ifdef HAVE_OFED_FMR_POOL_API +#ifdef HAVE_OFED_IB_DEVICE_OPS if (hdev->ibh_ibdev->ops.alloc_fmr && hdev->ibh_ibdev->ops.dealloc_fmr && hdev->ibh_ibdev->ops.map_phys_fmr && @@ -2585,10 +2846,12 @@ kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) #endif LCONSOLE_INFO("Using FMR for registration\n"); hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED; - } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + } else +#endif /* HAVE_OFED_FMR_POOL_API */ + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { LCONSOLE_INFO("Using FastReg for registration\n"); hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED; -#ifndef HAVE_IB_ALLOC_FAST_REG_MR +#ifndef HAVE_OFED_IB_ALLOC_FAST_REG_MR #ifdef IB_DEVICE_SG_GAPS_REG if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG) hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT; @@ -2598,10 +2861,14 @@ kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) rc = -ENOSYS; } + rc2 = kiblnd_port_get_attr(hdev); + if (rc2 != 0) + return rc2; + if (rc != 0) rc = -EINVAL; -#ifndef HAVE_IB_DEVICE_ATTRS +#ifndef HAVE_OFED_IB_DEVICE_ATTRS out_clean_attr: LIBCFS_FREE(dev_attr, sizeof(*dev_attr)); #endif @@ -2614,7 +2881,7 @@ out_clean_attr: return rc; } -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev) { @@ -2630,7 +2897,10 @@ kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev) void kiblnd_hdev_destroy(struct kib_hca_dev *hdev) { -#ifdef HAVE_IB_GET_DMA_MR + if (hdev->ibh_event_handler.device != NULL) + ib_unregister_event_handler(&hdev->ibh_event_handler); + +#ifdef HAVE_OFED_IB_GET_DMA_MR kiblnd_hdev_cleanup_mrs(hdev); #endif @@ -2643,7 +2913,7 @@ kiblnd_hdev_destroy(struct kib_hca_dev *hdev) LIBCFS_FREE(hdev, sizeof(*hdev)); } -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev) { @@ -2728,30 +2998,32 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) LIST_HEAD(zombie_tpo); LIST_HEAD(zombie_ppo); LIST_HEAD(zombie_fpo); - struct rdma_cm_id *cmid = NULL; + struct rdma_cm_id *cmid = NULL; struct kib_hca_dev *hdev = NULL; struct kib_hca_dev *old; - struct ib_pd *pd; + struct ib_pd *pd; struct kib_net *net; - struct sockaddr_in addr; - unsigned long flags; - int rc = 0; + struct sockaddr_in addr; + struct net_device *netdev; + unsigned long flags; + int rc = 0; int i; + bool set_fatal = true; - LASSERT (*kiblnd_tunables.kib_dev_failover > 1 || - dev->ibd_can_failover || - dev->ibd_hdev == NULL); + LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || + dev->ibd_can_failover || + dev->ibd_hdev == NULL); rc = kiblnd_dev_need_failover(dev, ns); - if (rc <= 0) - goto out; + if (rc <= 0) + goto out; - if (dev->ibd_hdev != NULL && - dev->ibd_hdev->ibh_cmid != NULL) { - /* XXX it's not good to close old listener at here, - * because we can fail to create new listener. - * But we have to close it now, otherwise rdma_bind_addr - * will return EADDRINUSE... How crap! */ + if (dev->ibd_hdev != NULL && + dev->ibd_hdev->ibh_cmid != NULL) { + /* XXX it's not good to close old listener at here, + * because we can fail to create new listener. + * But we have to close it now, otherwise rdma_bind_addr + * will return EADDRINUSE... How crap! */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); cmid = dev->ibd_hdev->ibh_cmid; @@ -2760,46 +3032,49 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) dev->ibd_hdev->ibh_cmid = NULL; write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - rdma_destroy_id(cmid); - } + rdma_destroy_id(cmid); + } cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP, IB_QPT_RC); - if (IS_ERR(cmid)) { - rc = PTR_ERR(cmid); - CERROR("Failed to create cmid for failover: %d\n", rc); - goto out; - } + if (IS_ERR(cmid)) { + rc = PTR_ERR(cmid); + CERROR("Failed to create cmid for failover: %d\n", rc); + goto out; + } - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); - addr.sin_port = htons(*kiblnd_tunables.kib_service); + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); + addr.sin_port = htons(*kiblnd_tunables.kib_service); - /* Bind to failover device or port */ - rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); + /* Bind to failover device or port */ + rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); if (rc != 0 || cmid->device == NULL) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); - rdma_destroy_id(cmid); - goto out; - } + if (!rc && !cmid->device) + set_fatal = false; + rdma_destroy_id(cmid); + goto out; + } LIBCFS_ALLOC(hdev, sizeof(*hdev)); - if (hdev == NULL) { - CERROR("Failed to allocate kib_hca_dev\n"); - rdma_destroy_id(cmid); - rc = -ENOMEM; - goto out; - } + if (hdev == NULL) { + CERROR("Failed to allocate kib_hca_dev\n"); + rdma_destroy_id(cmid); + rc = -ENOMEM; + goto out; + } - atomic_set(&hdev->ibh_ref, 1); - hdev->ibh_dev = dev; - hdev->ibh_cmid = cmid; - hdev->ibh_ibdev = cmid->device; + atomic_set(&hdev->ibh_ref, 1); + hdev->ibh_dev = dev; + hdev->ibh_cmid = cmid; + hdev->ibh_ibdev = cmid->device; + hdev->ibh_port = cmid->port_num; -#ifdef HAVE_IB_ALLOC_PD_2ARGS +#ifdef HAVE_OFED_IB_ALLOC_PD_2ARGS pd = ib_alloc_pd(cmid->device, 0); #else pd = ib_alloc_pd(cmid->device); @@ -2810,13 +3085,13 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) goto out; } - hdev->ibh_pd = pd; + hdev->ibh_pd = pd; - rc = rdma_listen(cmid, 0); - if (rc != 0) { - CERROR("Can't start new listener: %d\n", rc); - goto out; - } + rc = rdma_listen(cmid, 0); + if (rc != 0) { + CERROR("Can't start new listener: %d\n", rc); + goto out; + } rc = kiblnd_hdev_get_attr(hdev); if (rc != 0) { @@ -2824,7 +3099,7 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) goto out; } -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR rc = kiblnd_hdev_setup_mrs(hdev); if (rc != 0) { CERROR("Can't setup device: %d\n", rc); @@ -2832,6 +3107,10 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) } #endif + INIT_IB_EVENT_HANDLER(&hdev->ibh_event_handler, + hdev->ibh_ibdev, kiblnd_event_handler); + ib_register_event_handler(&hdev->ibh_event_handler); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); old = dev->ibd_hdev; @@ -2860,11 +3139,20 @@ kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) if (hdev != NULL) kiblnd_hdev_decref(hdev); - if (rc != 0) + if (rc != 0) { dev->ibd_failed_failover++; - else + } else { dev->ibd_failed_failover = 0; + if (set_fatal) { + rcu_read_lock(); + netdev = dev_get_by_name_rcu(ns, dev->ibd_ifname); + if (netdev && (lnet_get_link_status(netdev) == 1)) + kiblnd_set_ni_fatal_on(dev->ibd_hdev, 0); + rcu_read_unlock(); + } + } + return rc; } @@ -2883,27 +3171,224 @@ kiblnd_destroy_dev(struct kib_dev *dev) LIBCFS_FREE(dev, sizeof(*dev)); } +static struct kib_dev * +kiblnd_dev_search(char *ifname) +{ + struct kib_dev *alias = NULL; + struct kib_dev *dev; + char *colon; + char *colon2; + + colon = strchr(ifname, ':'); + list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { + if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + return dev; + + if (alias != NULL) + continue; + + colon2 = strchr(dev->ibd_ifname, ':'); + if (colon != NULL) + *colon = 0; + if (colon2 != NULL) + *colon2 = 0; + + if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + alias = dev; + + if (colon != NULL) + *colon = ':'; + if (colon2 != NULL) + *colon2 = ':'; + } + return alias; +} + +static int +kiblnd_handle_link_state_change(struct net_device *dev, + unsigned char operstate) +{ + struct lnet_ni *ni = NULL; + struct kib_dev *event_kibdev; + struct kib_net *net; + struct kib_net *cnxt; + bool link_down = !(operstate == IF_OPER_UP); + struct in_device *in_dev; + bool found_ip = false; + __u32 ni_state_before; + bool update_ping_buf = false; + int state; + DECLARE_CONST_IN_IFADDR(ifa); + + event_kibdev = kiblnd_dev_search(dev->name); + + if (!event_kibdev) + goto out; + + list_for_each_entry_safe(net, cnxt, &event_kibdev->ibd_nets, ibn_list) { + found_ip = false; + ni = net->ibn_ni; + + in_dev = __in_dev_get_rtnl(dev); + if (!in_dev) { + CDEBUG(D_NET, "Interface %s has no IPv4 status.\n", + dev->name); + ni_state_before = lnet_set_link_fatal_state(ni, 1); + goto ni_done; + } + in_dev_for_each_ifa_rtnl(ifa, in_dev) { + if (htonl(event_kibdev->ibd_ifip) == ifa->ifa_local) + found_ip = true; + } + endfor_ifa(in_dev); + + if (!found_ip) { + CDEBUG(D_NET, "Interface %s has no matching ip\n", + dev->name); + ni_state_before = lnet_set_link_fatal_state(ni, 1); + goto ni_done; + } + + if (link_down) { + ni_state_before = lnet_set_link_fatal_state(ni, 1); + } else { + state = (lnet_get_link_status(dev) == 0); + ni_state_before = lnet_set_link_fatal_state(ni, + state); + } +ni_done: + if (!update_ping_buf && + (ni->ni_state == LNET_NI_STATE_ACTIVE) && + (atomic_read(&ni->ni_fatal_error_on) != ni_state_before) && + (net->ibn_init == IBLND_INIT_ALL)) + update_ping_buf = true; + } + + if (update_ping_buf) + lnet_mark_ping_buffer_for_update(); +out: + return 0; +} + +static int +kiblnd_handle_inetaddr_change(struct in_ifaddr *ifa, unsigned long event) +{ + struct kib_dev *event_kibdev; + struct kib_net *net; + struct kib_net *cnxt; + struct net_device *event_netdev = ifa->ifa_dev->dev; + __u32 ni_state_before; + bool update_ping_buf = false; + struct lnet_ni *ni = NULL; + bool link_down; + + event_kibdev = kiblnd_dev_search(event_netdev->name); + + if (!event_kibdev) + goto out; + + if (htonl(event_kibdev->ibd_ifip) != ifa->ifa_local) + goto out; + + list_for_each_entry_safe(net, cnxt, &event_kibdev->ibd_nets, + ibn_list) { + ni = net->ibn_ni; + link_down = (event == NETDEV_DOWN); + ni_state_before = lnet_set_link_fatal_state(ni, link_down); + if (!update_ping_buf && + (ni->ni_state == LNET_NI_STATE_ACTIVE) && + ((event == NETDEV_DOWN) != ni_state_before) && + (net->ibn_init == IBLND_INIT_ALL)) + update_ping_buf = true; + } + + if (update_ping_buf) + lnet_mark_ping_buffer_for_update(); +out: + return 0; +} + + +/************************************ + * Net device notifier event handler + ************************************/ +static int kiblnd_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + unsigned char operstate; + + operstate = dev->operstate; + + CDEBUG(D_NET, "devevent: status=%ld, iface=%s ifindex %d state %u\n", + event, dev->name, dev->ifindex, operstate); + + switch (event) { + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_CHANGE: + kiblnd_handle_link_state_change(dev, operstate); + break; + } + + return NOTIFY_OK; +} + +/************************************ + * Inetaddr notifier event handler + ************************************/ +static int kiblnd_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + + CDEBUG(D_NET, "addrevent: status %ld ip addr %pI4, netmask %pI4.\n", + event, &ifa->ifa_address, &ifa->ifa_mask); + + switch (event) { + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_CHANGE: + kiblnd_handle_inetaddr_change(ifa, event); + break; + + } + return NOTIFY_OK; +} + +static struct notifier_block kiblnd_dev_notifier_block = { + .notifier_call = kiblnd_device_event, +}; + +static struct notifier_block kiblnd_inetaddr_notifier_block = { + .notifier_call = kiblnd_inetaddr_event, +}; + static void kiblnd_base_shutdown(void) { - struct kib_sched_info *sched; - int i; + struct kib_sched_info *sched; + struct kib_peer_ni *peer_ni; + int i; LASSERT(list_empty(&kiblnd_data.kib_devs)); - CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + CDEBUG(D_MALLOC, "before LND base cleanup: kmem %lld\n", + libcfs_kmem_read()); - switch (kiblnd_data.kib_init) { - default: - LBUG(); + if (kiblnd_data.kib_init == IBLND_INIT_ALL) { + unregister_netdevice_notifier(&kiblnd_dev_notifier_block); + unregister_inetaddr_notifier(&kiblnd_inetaddr_notifier_block); + } - case IBLND_INIT_ALL: - case IBLND_INIT_DATA: - LASSERT (kiblnd_data.kib_peers != NULL); - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - LASSERT(list_empty(&kiblnd_data.kib_peers[i])); - } + switch (kiblnd_data.kib_init) { + default: + LBUG(); + + case IBLND_INIT_ALL: + case IBLND_INIT_DATA: + hash_for_each(kiblnd_data.kib_peers, i, peer_ni, ibp_list) + LASSERT(0); LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); LASSERT(list_empty(&kiblnd_data.kib_reconn_list)); @@ -2914,41 +3399,29 @@ kiblnd_base_shutdown(void) /* NB: we really want to stop scheduler threads net by net * instead of the whole module, this should be improved - * with dynamic configuration LNet */ + * with dynamic configuration LNet. + */ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) wake_up_all(&sched->ibs_waitq); - wake_up_all(&kiblnd_data.kib_connd_waitq); - wake_up_all(&kiblnd_data.kib_failover_waitq); - - i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { - i++; - /* power of 2? */ - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "Waiting for %d threads to terminate\n", - atomic_read(&kiblnd_data.kib_nthreads)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - } - - /* fall through */ + wake_up(&kiblnd_data.kib_connd_waitq); + wake_up(&kiblnd_data.kib_failover_waitq); - case IBLND_INIT_NOTHING: - break; - } + wait_var_event_warning(&kiblnd_data.kib_nthreads, + !atomic_read(&kiblnd_data.kib_nthreads), + "Waiting for %d threads to terminate\n", + atomic_read(&kiblnd_data.kib_nthreads)); + fallthrough; - if (kiblnd_data.kib_peers != NULL) { - LIBCFS_FREE(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); + case IBLND_INIT_NOTHING: + break; } if (kiblnd_data.kib_scheds != NULL) cfs_percpt_free(kiblnd_data.kib_scheds); - CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + CDEBUG(D_MALLOC, "after LND base cleanup: kmem %lld\n", + libcfs_kmem_read()); kiblnd_data.kib_init = IBLND_INIT_NOTHING; module_put(THIS_MODULE); @@ -2959,16 +3432,15 @@ kiblnd_shutdown(struct lnet_ni *ni) { struct kib_net *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - int i; - unsigned long flags; + unsigned long flags; LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); if (net == NULL) goto out; - CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + CDEBUG(D_MALLOC, "before LND net cleanup: kmem %lld\n", + libcfs_kmem_read()); write_lock_irqsave(g_lock, flags); net->ibn_shutdown = 1; @@ -2978,22 +3450,16 @@ kiblnd_shutdown(struct lnet_ni *ni) default: LBUG(); - case IBLND_INIT_ALL: - /* nuke all existing peers within this net */ - kiblnd_del_peer(ni, LNET_NID_ANY); + case IBLND_INIT_ALL: + /* nuke all existing peers within this net */ + kiblnd_del_peer(ni, LNET_NID_ANY); /* Wait for all peer_ni state to clean up */ - i = 2; - while (atomic_read(&net->ibn_npeers) != 0) { - i++; - /* power of 2? */ - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "%s: waiting for %d peers to disconnect\n", - libcfs_nid2str(ni->ni_nid), - atomic_read(&net->ibn_npeers)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - } + wait_var_event_warning(&net->ibn_npeers, + atomic_read(&net->ibn_npeers) == 0, + "%s: waiting for %d peers to disconnect\n", + libcfs_nidstr(&ni->ni_nid), + atomic_read(&net->ibn_npeers)); kiblnd_net_fini_pools(net); @@ -3003,7 +3469,13 @@ kiblnd_shutdown(struct lnet_ni *ni) list_del(&net->ibn_list); write_unlock_irqrestore(g_lock, flags); - /* fall through */ + wake_up_all(&kiblnd_data.kib_connd_waitq); + wait_var_event_warning(&net->ibn_nconns, + atomic_read(&net->ibn_nconns) == 0, + "%s: waiting for %d conns to clean\n", + libcfs_nidstr(&ni->ni_nid), + atomic_read(&net->ibn_nconns)); + fallthrough; case IBLND_INIT_NOTHING: LASSERT (atomic_read(&net->ibn_nconns) == 0); @@ -3015,8 +3487,8 @@ kiblnd_shutdown(struct lnet_ni *ni) break; } - CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n", - atomic_read(&libcfs_kmemory)); + CDEBUG(D_MALLOC, "after LND net cleanup: kmem %lld\n", + libcfs_kmem_read()); net->ibn_init = IBLND_INIT_NOTHING; ni->ni_data = NULL; @@ -3031,13 +3503,15 @@ out: static int kiblnd_base_startup(struct net *ns) { - struct kib_sched_info *sched; - int rc; - int i; + struct kib_sched_info *sched; + int rc; + int i; LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); - try_module_get(THIS_MODULE); + if (!try_module_get(THIS_MODULE)) + goto failed; + memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */ rwlock_init(&kiblnd_data.kib_global_lock); @@ -3045,18 +3519,11 @@ kiblnd_base_startup(struct net *ns) INIT_LIST_HEAD(&kiblnd_data.kib_devs); INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs); - kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; - LIBCFS_ALLOC(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); - if (kiblnd_data.kib_peers == NULL) - goto failed; - - for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) - INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); + hash_init(kiblnd_data.kib_peers); spin_lock_init(&kiblnd_data.kib_connd_lock); INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); + INIT_LIST_HEAD(&kiblnd_data.kib_connd_waits); INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list); INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait); @@ -3089,36 +3556,39 @@ kiblnd_base_startup(struct net *ns) sched->ibs_cpt = i; } - kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR; + kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR; - /* lists/ptrs/locks initialised */ - kiblnd_data.kib_init = IBLND_INIT_DATA; - /*****************************************************/ + /* lists/ptrs/locks initialised */ + kiblnd_data.kib_init = IBLND_INIT_DATA; + /*****************************************************/ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); - if (rc != 0) { - CERROR("Can't spawn o2iblnd connd: %d\n", rc); - goto failed; - } + if (rc != 0) { + CERROR("Can't spawn o2iblnd connd: %d\n", rc); + goto failed; + } if (*kiblnd_tunables.kib_dev_failover != 0) rc = kiblnd_thread_start(kiblnd_failover_thread, ns, "kiblnd_failover"); - if (rc != 0) { - CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); - goto failed; - } + if (rc != 0) { + CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); + goto failed; + } - /* flag everything initialised */ - kiblnd_data.kib_init = IBLND_INIT_ALL; - /*****************************************************/ + register_netdevice_notifier(&kiblnd_dev_notifier_block); + register_inetaddr_notifier(&kiblnd_inetaddr_notifier_block); - return 0; + /* flag everything initialised */ + kiblnd_data.kib_init = IBLND_INIT_ALL; + /*****************************************************/ + + return 0; failed: - kiblnd_base_shutdown(); - return -ENETDOWN; + kiblnd_base_shutdown(); + return -ENETDOWN; } static int @@ -3144,12 +3614,11 @@ kiblnd_start_schedulers(struct kib_sched_info *sched) } for (i = 0; i < nthrs; i++) { - long id; - char name[20]; - id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); - snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", - KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); - rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); + long id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); + + rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, + "kiblnd_sd_%02ld_%02ld", + KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); if (rc == 0) continue; @@ -3188,39 +3657,6 @@ static int kiblnd_dev_start_threads(struct kib_dev *dev, bool newdev, u32 *cpts, return 0; } -static struct kib_dev * -kiblnd_dev_search(char *ifname) -{ - struct kib_dev *alias = NULL; - struct kib_dev *dev; - char *colon; - char *colon2; - - colon = strchr(ifname, ':'); - list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) - return dev; - - if (alias != NULL) - continue; - - colon2 = strchr(dev->ibd_ifname, ':'); - if (colon != NULL) - *colon = 0; - if (colon2 != NULL) - *colon2 = 0; - - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) - alias = dev; - - if (colon != NULL) - *colon = ':'; - if (colon2 != NULL) - *colon2 = ':'; - } - return alias; -} - static int kiblnd_startup(struct lnet_ni *ni) { @@ -3232,6 +3668,7 @@ kiblnd_startup(struct lnet_ni *ni) int rc; int i; bool newdev; + struct net_device *netdev; LASSERT(ni->ni_net->net_lnd == &the_o2iblnd); @@ -3248,26 +3685,22 @@ kiblnd_startup(struct lnet_ni *ni) goto failed; } + net->ibn_ni = ni; net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC; kiblnd_tunables_setup(ni); - /* - * ni_interfaces is only to support legacy pre Multi-Rail - * tcp bonding for ksocklnd. Multi-Rail wants each secondary - * IP to be treated as an unique 'struct ni' interfaces instead. + /* Multi-Rail wants each secondary + * IP to be treated as an unique 'struct ni' interface. */ - if (ni->ni_interfaces[0] != NULL) { + if (ni->ni_interface != NULL) { /* Use the IPoIB interface specified in 'networks=' */ - if (ni->ni_interfaces[1] != NULL) { - CERROR("ko2iblnd: Multiple interfaces not supported\n"); - rc = -EINVAL; - goto failed; - } - - ifname = ni->ni_interfaces[0]; + ifname = ni->ni_interface; } else { ifname = *kiblnd_tunables.kib_default_ipif; + rc = libcfs_strnid(&ni->ni_nid, ifname); + if (rc < 0 || ni->ni_nid.nid_type != O2IBLND) + memset(&ni->ni_nid, 0, sizeof(ni->ni_nid)); } if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) { @@ -3276,16 +3709,17 @@ kiblnd_startup(struct lnet_ni *ni) goto failed; } - rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns); + rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns, false); if (rc < 0) goto failed; - for (i = 0; i < rc; i++) { - if (strcmp(ifname, ifaces[i].li_name) == 0) - break; - } + i = lnet_inet_select(ni, ifaces, rc); + if (i < 0) + goto failed; - if (i == rc) { + if (nid_addr_is_set(&ni->ni_nid)) { + strscpy(ifname, ifaces[i].li_name, sizeof(ifname)); + } else if (strcmp(ifname, ifaces[i].li_name) != 0) { CERROR("ko2iblnd: No matching interfaces\n"); rc = -ENOENT; goto failed; @@ -3301,10 +3735,10 @@ kiblnd_startup(struct lnet_ni *ni) goto failed; } - ibdev->ibd_ifip = ifaces[i].li_ipaddr; - strlcpy(ibdev->ibd_ifname, ifaces[i].li_name, + ibdev->ibd_ifip = ntohl(ifaces[i].li_ipaddr); + strscpy(ibdev->ibd_ifname, ifaces[i].li_name, sizeof(ibdev->ibd_ifname)); - ibdev->ibd_can_failover = !!(ifaces[i].li_flags & IFF_MASTER); + ibdev->ibd_can_failover = ifaces[i].li_iff_master; INIT_LIST_HEAD(&ibdev->ibd_nets); INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */ @@ -3322,8 +3756,12 @@ kiblnd_startup(struct lnet_ni *ni) } net->ibn_dev = ibdev; - ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); - + ni->ni_nid.nid_addr[0] = cpu_to_be32(ibdev->ibd_ifip); + if (!ni->ni_interface) { + rc = lnet_ni_add_interface(ni, ifaces[i].li_name); + if (rc < 0) + CWARN("ko2iblnd failed to allocate ni_interface\n"); + } ni->ni_dev_cpt = ifaces[i].li_cpt; rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts); @@ -3339,9 +3777,24 @@ kiblnd_startup(struct lnet_ni *ni) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); ibdev->ibd_nnets++; list_add_tail(&net->ibn_list, &ibdev->ibd_nets); + /* for health check */ + if (ibdev->ibd_hdev->ibh_state == IBLND_DEV_PORT_DOWN) + kiblnd_set_ni_fatal_on(ibdev->ibd_hdev, 1); + + rcu_read_lock(); + netdev = dev_get_by_name_rcu(ni->ni_net_ns, net->ibn_dev->ibd_ifname); + if (netdev && + ((netdev->reg_state == NETREG_UNREGISTERING) || + (netdev->operstate != IF_OPER_UP) || + (lnet_get_link_status(netdev) == 0))) { + kiblnd_set_ni_fatal_on(ibdev->ibd_hdev, 1); + } + rcu_read_unlock(); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); net->ibn_init = IBLND_INIT_ALL; + kfree(ifaces); return 0; @@ -3358,16 +3811,177 @@ failed: return -ENETDOWN; } -static struct lnet_lnd the_o2iblnd = { +static const struct lnet_lnd the_o2iblnd = { .lnd_type = O2IBLND, .lnd_startup = kiblnd_startup, .lnd_shutdown = kiblnd_shutdown, .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, .lnd_send = kiblnd_send, .lnd_recv = kiblnd_recv, + .lnd_get_dev_prio = kiblnd_get_dev_prio, + .lnd_nl_get = kiblnd_nl_get, + .lnd_nl_set = kiblnd_nl_set, + .lnd_keys = &kiblnd_tunables_keys, }; +static void ko2inlnd_assert_wire_constants(void) +{ + BUILD_BUG_ON(IBLND_MSG_MAGIC != 0x0be91b91); + BUILD_BUG_ON(IBLND_MSG_VERSION_1 != 0x11); + BUILD_BUG_ON(IBLND_MSG_VERSION_2 != 0x12); + BUILD_BUG_ON(IBLND_MSG_VERSION != IBLND_MSG_VERSION_2); + + BUILD_BUG_ON(IBLND_MSG_CONNREQ != 0xc0); + BUILD_BUG_ON(IBLND_MSG_CONNACK != 0xc1); + BUILD_BUG_ON(IBLND_MSG_NOOP != 0xd0); + BUILD_BUG_ON(IBLND_MSG_IMMEDIATE != 0xd1); + BUILD_BUG_ON(IBLND_MSG_PUT_REQ != 0xd2); + BUILD_BUG_ON(IBLND_MSG_PUT_NAK != 0xd3); + BUILD_BUG_ON(IBLND_MSG_PUT_ACK != 0xd4); + BUILD_BUG_ON(IBLND_MSG_PUT_DONE != 0xd5); + BUILD_BUG_ON(IBLND_MSG_GET_REQ != 0xd6); + BUILD_BUG_ON(IBLND_MSG_GET_DONE != 0xd7); + + BUILD_BUG_ON(IBLND_REJECT_CONN_RACE != 1); + BUILD_BUG_ON(IBLND_REJECT_NO_RESOURCES != 2); + BUILD_BUG_ON(IBLND_REJECT_FATAL != 3); + BUILD_BUG_ON(IBLND_REJECT_CONN_UNCOMPAT != 4); + BUILD_BUG_ON(IBLND_REJECT_CONN_STALE != 5); + BUILD_BUG_ON(IBLND_REJECT_RDMA_FRAGS != 6); + BUILD_BUG_ON(IBLND_REJECT_MSG_QUEUE_SIZE != 7); + BUILD_BUG_ON(IBLND_REJECT_INVALID_SRV_ID != 8); + + BUILD_BUG_ON((int)sizeof(struct kib_connparams) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_connparams, ibcp_queue_depth) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_connparams *)0)->ibcp_queue_depth) != 2); + BUILD_BUG_ON((int)offsetof(struct kib_connparams, ibcp_max_frags) != 2); + BUILD_BUG_ON((int)sizeof(((struct kib_connparams *)0)->ibcp_max_frags) != 2); + BUILD_BUG_ON((int)offsetof(struct kib_connparams, ibcp_max_msg_size) != 4); + BUILD_BUG_ON((int)sizeof(((struct kib_connparams *)0)->ibcp_max_msg_size) != 4); + + BUILD_BUG_ON((int)sizeof(struct kib_immediate_msg) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_immediate_msg, ibim_hdr) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_immediate_msg *)0)->ibim_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_immediate_msg, ibim_payload) != 72); + BUILD_BUG_ON((int)sizeof(((struct kib_immediate_msg *)0)->ibim_payload) != 0); + + BUILD_BUG_ON((int)sizeof(struct kib_rdma_frag) != 12); + BUILD_BUG_ON((int)offsetof(struct kib_rdma_frag, rf_nob) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_rdma_frag *)0)->rf_nob) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_rdma_frag, rf_addr) != 4); + BUILD_BUG_ON((int)sizeof(((struct kib_rdma_frag *)0)->rf_addr) != 8); + + BUILD_BUG_ON((int)sizeof(struct kib_rdma_desc) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_rdma_desc, rd_key) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_rdma_desc *)0)->rd_key) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_rdma_desc, rd_nfrags) != 4); + BUILD_BUG_ON((int)sizeof(((struct kib_rdma_desc *)0)->rd_nfrags) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_rdma_desc, rd_frags) != 8); + BUILD_BUG_ON((int)sizeof(((struct kib_rdma_desc *)0)->rd_frags) != 0); + + BUILD_BUG_ON((int)sizeof(struct kib_putreq_msg) != 80); + BUILD_BUG_ON((int)offsetof(struct kib_putreq_msg, ibprm_hdr) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_putreq_msg *)0)->ibprm_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_putreq_msg, ibprm_cookie) != 72); + BUILD_BUG_ON((int)sizeof(((struct kib_putreq_msg *)0)->ibprm_cookie) != 8); + + BUILD_BUG_ON((int)sizeof(struct kib_putack_msg) != 24); + BUILD_BUG_ON((int)offsetof(struct kib_putack_msg, ibpam_src_cookie) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_putack_msg *)0)->ibpam_src_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_putack_msg, ibpam_dst_cookie) != 8); + BUILD_BUG_ON((int)sizeof(((struct kib_putack_msg *)0)->ibpam_dst_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_putack_msg, ibpam_rd) != 16); + BUILD_BUG_ON((int)sizeof(((struct kib_putack_msg *)0)->ibpam_rd) != 8); + + BUILD_BUG_ON((int)sizeof(struct kib_get_msg) != 88); + BUILD_BUG_ON((int)offsetof(struct kib_get_msg, ibgm_hdr) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_get_msg *)0)->ibgm_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_get_msg, ibgm_cookie) != 72); + BUILD_BUG_ON((int)sizeof(((struct kib_get_msg *)0)->ibgm_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_get_msg, ibgm_rd) != 80); + BUILD_BUG_ON((int)sizeof(((struct kib_get_msg *)0)->ibgm_rd) != 8); + + BUILD_BUG_ON((int)sizeof(struct kib_completion_msg) != 12); + BUILD_BUG_ON((int)offsetof(struct kib_completion_msg, ibcm_cookie) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_completion_msg *)0)->ibcm_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_completion_msg, ibcm_status) != 8); + BUILD_BUG_ON((int)sizeof(((struct kib_completion_msg *)0)->ibcm_status) != 4); + + /* Checks for struct kib_msg */ + //BUILD_BUG_ON((int)sizeof(struct kib_msg) != 12); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_magic) != 0); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_magic) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_version) != 4); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_version) != 2); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_type) != 6); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_type) != 1); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_credits) != 7); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_credits) != 1); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_nob) != 8); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_nob) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_cksum) != 12); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_cksum) != 4); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_srcnid) != 16); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_srcnid) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_srcstamp) != 24); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_srcstamp) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_dstnid) != 32); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_dstnid) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_dststamp) != 40); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_dststamp) != 8); + + /* Connparams */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.connparams.ibcp_queue_depth) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.connparams.ibcp_queue_depth) != 2); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.connparams.ibcp_max_frags) != 50); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.connparams.ibcp_max_frags) != 2); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.connparams.ibcp_max_msg_size) != 52); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.connparams.ibcp_max_msg_size) != 4); + + /* Immediate message */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.immediate.ibim_hdr) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.immediate.ibim_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.immediate.ibim_payload) != 120); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.immediate.ibim_payload) != 0); + + /* PUT req message */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.putreq.ibprm_hdr) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.putreq.ibprm_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.putreq.ibprm_cookie) != 120); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.putreq.ibprm_cookie) != 8); + + /* Put ACK */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.putack.ibpam_src_cookie) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.putack.ibpam_src_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.putack.ibpam_dst_cookie) != 56); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.putack.ibpam_dst_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.putack.ibpam_rd) != 64); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.putack.ibpam_rd) != 8); + + /* GET message */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.get.ibgm_hdr) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.get.ibgm_hdr) != 72); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.get.ibgm_cookie) != 120); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.get.ibgm_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.get.ibgm_rd) != 128); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.get.ibgm_rd) != 8); + + /* Completion message */ + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.completion.ibcm_cookie) != 48); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.completion.ibcm_cookie) != 8); + BUILD_BUG_ON((int)offsetof(struct kib_msg, ibm_u.completion.ibcm_status) != 56); + BUILD_BUG_ON((int)sizeof(((struct kib_msg *)0)->ibm_u.completion.ibcm_status) != 4); + + /* Sanity checks */ + BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE); + BUILD_BUG_ON(offsetof(struct kib_msg, + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) > + IBLND_MSG_SIZE); + BUILD_BUG_ON(offsetof(struct kib_msg, + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) > + IBLND_MSG_SIZE); +} + static void __exit ko2iblnd_exit(void) { lnet_unregister_lnd(&the_o2iblnd); @@ -3377,18 +3991,16 @@ static int __init ko2iblnd_init(void) { int rc; - CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE); - CLASSERT(offsetof(struct kib_msg, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= - IBLND_MSG_SIZE); - CLASSERT(offsetof(struct kib_msg, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ko2inlnd_assert_wire_constants(); rc = kiblnd_tunables_init(); if (rc != 0) return rc; + rc = libcfs_setup(); + if (rc) + return rc; + lnet_register_lnd(&the_o2iblnd); return 0;