X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd.c;h=e846e9d8a66a0714e3d3d22dc73401ff58e25669;hp=1c3e2d2e1a7b3bc2cbdbe0bf9ae60d8f3df8a024;hb=e5574f72f2fd912ffa6b3e9a7bc2b69bd8370a22;hpb=8cbb8cd3e771e7f7e0f99cafc19fad32770dc015 diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 1c3e2d2..e846e9d 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -23,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2016, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -35,11 +35,13 @@ */ #include +#include + #include "o2iblnd.h" -static lnd_t the_o2iblnd; +static const struct lnet_lnd the_o2iblnd; -kib_data_t kiblnd_data; +struct kib_data kiblnd_data; static __u32 kiblnd_cksum (void *ptr, int nob) @@ -96,41 +98,40 @@ kiblnd_msgtype2str(int type) static int kiblnd_msgtype2size(int type) { - const int hdr_size = offsetof(kib_msg_t, ibm_u); + const int hdr_size = offsetof(struct kib_msg, ibm_u); switch (type) { case IBLND_MSG_CONNREQ: case IBLND_MSG_CONNACK: - return hdr_size + sizeof(kib_connparams_t); + return hdr_size + sizeof(struct kib_connparams); case IBLND_MSG_NOOP: return hdr_size; case IBLND_MSG_IMMEDIATE: - return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]); + return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]); case IBLND_MSG_PUT_REQ: - return hdr_size + sizeof(kib_putreq_msg_t); + return hdr_size + sizeof(struct kib_putreq_msg); case IBLND_MSG_PUT_ACK: - return hdr_size + sizeof(kib_putack_msg_t); + return hdr_size + sizeof(struct kib_putack_msg); case IBLND_MSG_GET_REQ: - return hdr_size + sizeof(kib_get_msg_t); + return hdr_size + sizeof(struct kib_get_msg); case IBLND_MSG_PUT_NAK: case IBLND_MSG_PUT_DONE: case IBLND_MSG_GET_DONE: - return hdr_size + sizeof(kib_completion_msg_t); + return hdr_size + sizeof(struct kib_completion_msg); default: return -1; } } -static int -kiblnd_unpack_rd(kib_msg_t *msg, int flip) +static int kiblnd_unpack_rd(struct kib_msg *msg, int flip) { - kib_rdma_desc_t *rd; + struct kib_rdma_desc *rd; int nob; int n; int i; @@ -155,7 +156,7 @@ kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 1; } - nob = offsetof (kib_msg_t, ibm_u) + + nob = offsetof(struct kib_msg, ibm_u) + kiblnd_rd_msg_size(rd, msg->ibm_type, n); if (msg->ibm_nob < nob) { @@ -175,11 +176,10 @@ kiblnd_unpack_rd(kib_msg_t *msg, int flip) return 0; } -void -kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp) +void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version, + int credits, lnet_nid_t dstnid, __u64 dststamp) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; /* CAVEAT EMPTOR! all message fields not set here should have been * initialised previously. */ @@ -200,10 +200,9 @@ kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, } } -int -kiblnd_unpack_msg(kib_msg_t *msg, int nob) +int kiblnd_unpack_msg(struct kib_msg *msg, int nob) { - const int hdr_size = offsetof(kib_msg_t, ibm_u); + const int hdr_size = offsetof(struct kib_msg, ibm_u); __u32 msg_cksum; __u16 version; int msg_nob; @@ -255,10 +254,10 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) msg->ibm_cksum = msg_cksum; if (flip) { - /* leave magic unflipped as a clue to peer endianness */ + /* leave magic unflipped as a clue to peer_ni endianness */ msg->ibm_version = version; - CLASSERT (sizeof(msg->ibm_type) == 1); - CLASSERT (sizeof(msg->ibm_credits) == 1); + BUILD_BUG_ON(sizeof(msg->ibm_type) != 1); + BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1); msg->ibm_nob = msg_nob; __swab64s(&msg->ibm_srcnid); __swab64s(&msg->ibm_srcstamp); @@ -313,33 +312,34 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob) } int -kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) +kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp, + lnet_nid_t nid) { - kib_peer_t *peer; - kib_net_t *net = ni->ni_data; - int cpt = lnet_cpt_of_nid(nid, ni); - unsigned long flags; + struct kib_peer_ni *peer_ni; + struct kib_net *net = ni->ni_data; + int cpt = lnet_cpt_of_nid(nid, ni); + unsigned long flags; LASSERT(net != NULL); LASSERT(nid != LNET_NID_ANY); - LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) { - CERROR("Cannot allocate peer\n"); + LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni)); + if (peer_ni == NULL) { + CERROR("Cannot allocate peer_ni\n"); return -ENOMEM; } - peer->ibp_ni = ni; - peer->ibp_nid = nid; - peer->ibp_error = 0; - peer->ibp_last_alive = 0; - peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni); - peer->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits; - atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ + peer_ni->ibp_ni = ni; + peer_ni->ibp_nid = nid; + peer_ni->ibp_error = 0; + peer_ni->ibp_last_alive = 0; + peer_ni->ibp_max_frags = IBLND_MAX_RDMA_FRAGS; + peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits; + atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */ - INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ - INIT_LIST_HEAD(&peer->ibp_conns); - INIT_LIST_HEAD(&peer->ibp_tx_queue); + INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */ + INIT_LIST_HEAD(&peer_ni->ibp_conns); + INIT_LIST_HEAD(&peer_ni->ibp_tx_queue); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -351,72 +351,80 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - *peerp = peer; + *peerp = peer_ni; return 0; } void -kiblnd_destroy_peer (kib_peer_t *peer) +kiblnd_destroy_peer(struct kib_peer_ni *peer_ni) { - kib_net_t *net = peer->ibp_ni->ni_data; + struct kib_net *net = peer_ni->ibp_ni->ni_data; LASSERT(net != NULL); - LASSERT (atomic_read(&peer->ibp_refcount) == 0); - LASSERT(!kiblnd_peer_active(peer)); - LASSERT(kiblnd_peer_idle(peer)); - LASSERT(list_empty(&peer->ibp_tx_queue)); + LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0); + LASSERT(!kiblnd_peer_active(peer_ni)); + LASSERT(kiblnd_peer_idle(peer_ni)); + LASSERT(list_empty(&peer_ni->ibp_tx_queue)); - LIBCFS_FREE(peer, sizeof(*peer)); + LIBCFS_FREE(peer_ni, sizeof(*peer_ni)); - /* NB a peer's connections keep a reference on their peer until + /* NB a peer_ni's connections keep a reference on their peer_ni until * they are destroyed, so we can be assured that _all_ state to do - * with this peer has been cleaned up when its refcount drops to + * with this peer_ni has been cleaned up when its refcount drops to * zero. */ - atomic_dec(&net->ibn_npeers); + if (atomic_dec_and_test(&net->ibn_npeers)) + wake_up_var(&net->ibn_npeers); } -kib_peer_t * -kiblnd_find_peer_locked (lnet_nid_t nid) +struct kib_peer_ni * +kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference * that this creates */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; - kib_peer_t *peer; + struct kib_peer_ni *peer_ni; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); - - if (peer->ibp_nid != nid) + peer_ni = list_entry(tmp, struct kib_peer_ni, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); + + /* + * Match a peer if its NID and the NID of the local NI it + * communicates over are the same. Otherwise don't match + * the peer, which will result in a new lnd peer being + * created. + */ + if (peer_ni->ibp_nid != nid || + peer_ni->ibp_ni->ni_nid != ni->ni_nid) continue; - CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n", - peer, libcfs_nid2str(nid), - atomic_read(&peer->ibp_refcount), - peer->ibp_version); - return peer; + CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n", + peer_ni, libcfs_nid2str(nid), + atomic_read(&peer_ni->ibp_refcount), + peer_ni->ibp_version); + return peer_ni; } return NULL; } void -kiblnd_unlink_peer_locked (kib_peer_t *peer) +kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni) { - LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer_ni->ibp_conns)); - LASSERT (kiblnd_peer_active(peer)); - list_del_init(&peer->ibp_list); + LASSERT (kiblnd_peer_active(peer_ni)); + list_del_init(&peer_ni->ibp_list); /* lose peerlist's ref */ - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); } static int -kiblnd_get_peer_info(lnet_ni_t *ni, int index, +kiblnd_get_peer_info(struct lnet_ni *ni, int index, lnet_nid_t *nidp, int *count) { - kib_peer_t *peer; + struct kib_peer_ni *peer_ni; struct list_head *ptmp; int i; unsigned long flags; @@ -427,17 +435,17 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; if (index-- > 0) continue; - *nidp = peer->ibp_nid; - *count = atomic_read(&peer->ibp_refcount); + *nidp = peer_ni->ibp_nid; + *count = atomic_read(&peer_ni->ibp_refcount); read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -450,33 +458,30 @@ kiblnd_get_peer_info(lnet_ni_t *ni, int index, } static void -kiblnd_del_peer_locked (kib_peer_t *peer) +kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni) { - struct list_head *ctmp; - struct list_head *cnxt; - kib_conn_t *conn; + struct kib_conn *cnxt; + struct kib_conn *conn; - if (list_empty(&peer->ibp_conns)) { - kiblnd_unlink_peer_locked(peer); + if (list_empty(&peer_ni->ibp_conns)) { + kiblnd_unlink_peer_locked(peer_ni); } else { - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); - + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) kiblnd_close_conn_locked(conn, 0); - } - /* NB closing peer's last conn unlinked it. */ + /* NB closing peer_ni's last conn unlinked it. */ } - /* NB peer now unlinked; might even be freed if the peer table had the + /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the * last ref on it. */ } static int -kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid) +kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid) { - struct list_head zombies = LIST_HEAD_INIT(zombies); + LIST_HEAD(zombies); struct list_head *ptmp; struct list_head *pnxt; - kib_peer_t *peer; + struct kib_peer_ni *peer_ni; int lo; int hi; int i; @@ -494,40 +499,40 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid)) + if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid)) continue; - if (!list_empty(&peer->ibp_tx_queue)) { - LASSERT(list_empty(&peer->ibp_conns)); + if (!list_empty(&peer_ni->ibp_tx_queue)) { + LASSERT(list_empty(&peer_ni->ibp_conns)); - list_splice_init(&peer->ibp_tx_queue, + list_splice_init(&peer_ni->ibp_tx_queue, &zombies); } - kiblnd_del_peer_locked(peer); + kiblnd_del_peer_locked(peer_ni); rc = 0; /* matched something */ } } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_txlist_done(ni, &zombies, -EIO); + kiblnd_txlist_done(&zombies, -EIO, LNET_MSG_STATUS_LOCAL_ERROR); return rc; } -static kib_conn_t * -kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) +static struct kib_conn * +kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index) { - kib_peer_t *peer; + struct kib_peer_ni *peer_ni; struct list_head *ptmp; - kib_conn_t *conn; + struct kib_conn *conn; struct list_head *ctmp; int i; unsigned long flags; @@ -537,17 +542,17 @@ kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - list_for_each(ctmp, &peer->ibp_conns) { + list_for_each(ctmp, &peer_ni->ibp_conns) { if (index-- > 0) continue; - conn = list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, struct kib_conn, ibc_list); kiblnd_conn_addref(conn); read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -561,27 +566,27 @@ kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) } static void -kiblnd_debug_rx (kib_rx_t *rx) +kiblnd_debug_rx(struct kib_rx *rx) { - CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n", - rx, rx->rx_status, rx->rx_msg->ibm_type, - rx->rx_msg->ibm_credits); + CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n", + rx, rx->rx_msg->ibm_type, + rx->rx_msg->ibm_credits); } static void -kiblnd_debug_tx (kib_tx_t *tx) +kiblnd_debug_tx(struct kib_tx *tx) { - CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx " + CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld " "cookie %#llx msg %s%s type %x cred %d\n", tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting, - tx->tx_status, tx->tx_deadline, tx->tx_cookie, + tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie, tx->tx_lntmsg[0] == NULL ? "-" : "!", tx->tx_lntmsg[1] == NULL ? "-" : "!", tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits); } void -kiblnd_debug_conn (kib_conn_t *conn) +kiblnd_debug_conn(struct kib_conn *conn) { struct list_head *tmp; int i; @@ -599,27 +604,27 @@ kiblnd_debug_conn (kib_conn_t *conn) CDEBUG(D_CONSOLE, " early_rxs:\n"); list_for_each(tmp, &conn->ibc_early_rxs) - kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list)); + kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list)); CDEBUG(D_CONSOLE, " tx_noops:\n"); list_for_each(tmp, &conn->ibc_tx_noops) - kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); CDEBUG(D_CONSOLE, " tx_queue_nocred:\n"); list_for_each(tmp, &conn->ibc_tx_queue_nocred) - kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n"); list_for_each(tmp, &conn->ibc_tx_queue_rsrvd) - kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); CDEBUG(D_CONSOLE, " tx_queue:\n"); list_for_each(tmp, &conn->ibc_tx_queue) - kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); CDEBUG(D_CONSOLE, " active_txs:\n"); list_for_each(tmp, &conn->ibc_active_txs) - kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list)); + kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list)); CDEBUG(D_CONSOLE, " rxs:\n"); for (i = 0; i < IBLND_RX_MSGS(conn); i++) @@ -628,46 +633,22 @@ kiblnd_debug_conn (kib_conn_t *conn) spin_unlock(&conn->ibc_lock); } -int -kiblnd_translate_mtu(int value) -{ - switch (value) { - default: - return -1; - case 0: - return 0; - case 256: - return IB_MTU_256; - case 512: - return IB_MTU_512; - case 1024: - return IB_MTU_1024; - case 2048: - return IB_MTU_2048; - case 4096: - return IB_MTU_4096; - } -} - static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) { - int mtu; - /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ if (cmid->route.path_rec == NULL) return; - mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); - LASSERT (mtu >= 0); - if (mtu != 0) - cmid->route.path_rec->mtu = mtu; + if (*kiblnd_tunables.kib_ib_mtu) + cmid->route.path_rec->mtu = + ib_mtu_int_to_enum(*kiblnd_tunables.kib_ib_mtu); } static int -kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) +kiblnd_get_completion_vector(struct kib_conn *conn, int cpt) { - cpumask_t *mask; + cpumask_var_t *mask; int vectors; int off; int i; @@ -681,8 +662,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) /* hash NID to CPU id in this partition... */ ibp_nid = conn->ibc_peer->ibp_nid; - off = do_div(ibp_nid, cpumask_weight(mask)); - for_each_cpu(i, mask) { + off = do_div(ibp_nid, cpumask_weight(*mask)); + for_each_cpu(i, *mask) { if (off-- == 0) return i % vectors; } @@ -691,26 +672,86 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 1; } -kib_conn_t * -kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, +/* + * Get the scheduler bound to this CPT. If the scheduler has no + * threads, which means that the CPT has no CPUs, then grab the + * next scheduler that we can use. + * + * This case would be triggered if a NUMA node is configured with + * no associated CPUs. + */ +static struct kib_sched_info * +kiblnd_get_scheduler(int cpt) +{ + struct kib_sched_info *sched; + int i; + + sched = kiblnd_data.kib_scheds[cpt]; + + if (sched->ibs_nthreads > 0) + return sched; + + cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { + if (sched->ibs_nthreads > 0) { + CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n", + cpt, sched->ibs_cpt); + return sched; + } + } + + return NULL; +} + +static unsigned int kiblnd_send_wrs(struct kib_conn *conn) +{ + /* + * One WR for the LNet message + * And ibc_max_frags for the transfer WRs + */ + int ret; + int multiplier = 1 + conn->ibc_max_frags; + enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps; + + /* FastReg needs two extra WRs for map and invalidate */ + if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) + multiplier += 2; + + /* account for a maximum of ibc_queue_depth in-flight transfers */ + ret = multiplier * conn->ibc_queue_depth; + + if (ret > conn->ibc_hdev->ibh_max_qp_wr) { + CDEBUG(D_NET, "peer_credits %u will result in send work " + "request size %d larger than maximum %d device " + "can handle\n", conn->ibc_queue_depth, ret, + conn->ibc_hdev->ibh_max_qp_wr); + conn->ibc_queue_depth = + conn->ibc_hdev->ibh_max_qp_wr / multiplier; + } + + /* don't go beyond the maximum the device can handle */ + return min(ret, conn->ibc_hdev->ibh_max_qp_wr); +} + +struct kib_conn * +kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid, int state, int version) { /* CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's - * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself - * is destroyed. On failure, the caller's ref on 'peer' remains and + * ref on 'peer_ni'. It also "owns" 'cmid' and destroys it when it itself + * is destroyed. On failure, the caller's ref on 'peer_ni' remains and * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has * its ref on 'cmid'). */ rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_net_t *net = peer->ibp_ni->ni_data; - kib_dev_t *dev; - struct ib_qp_init_attr *init_qp_attr; + struct kib_net *net = peer_ni->ibp_ni->ni_data; + struct kib_dev *dev; + struct ib_qp_init_attr init_qp_attr = {}; struct kib_sched_info *sched; #ifdef HAVE_IB_CQ_INIT_ATTR struct ib_cq_init_attr cq_attr = {}; #endif - kib_conn_t *conn; + struct kib_conn *conn; struct ib_cq *cq; unsigned long flags; int cpt; @@ -722,33 +763,36 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, dev = net->ibn_dev; - cpt = lnet_cpt_of_nid(peer->ibp_nid, peer->ibp_ni); - sched = kiblnd_data.kib_scheds[cpt]; + cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni); + sched = kiblnd_get_scheduler(cpt); - LASSERT(sched->ibs_nthreads > 0); - - LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, - sizeof(*init_qp_attr)); - if (init_qp_attr == NULL) { - CERROR("Can't allocate qp_attr for %s\n", - libcfs_nid2str(peer->ibp_nid)); + if (sched == NULL) { + CERROR("no schedulers available. node is unhealthy\n"); goto failed_0; } + /* + * The cpt might have changed if we ended up selecting a non cpt + * native scheduler. So use the scheduler's cpt instead. + */ + cpt = sched->ibs_cpt; + LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); if (conn == NULL) { CERROR("Can't allocate connection for %s\n", - libcfs_nid2str(peer->ibp_nid)); - goto failed_1; + libcfs_nid2str(peer_ni->ibp_nid)); + goto failed_0; } conn->ibc_state = IBLND_CONN_INIT; conn->ibc_version = version; - conn->ibc_peer = peer; /* I take the caller's ref */ + conn->ibc_peer = peer_ni; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; - conn->ibc_max_frags = peer->ibp_max_frags; - conn->ibc_queue_depth = peer->ibp_queue_depth; + conn->ibc_max_frags = peer_ni->ibp_max_frags; + conn->ibc_queue_depth = peer_ni->ibp_queue_depth; + conn->ibc_rxs = NULL; + conn->ibc_rx_pages = NULL; INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_tx_noops); @@ -756,6 +800,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd); INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred); INIT_LIST_HEAD(&conn->ibc_active_txs); + INIT_LIST_HEAD(&conn->ibc_zombie_txs); spin_lock_init(&conn->ibc_lock); LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, @@ -793,20 +838,6 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); - LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, - IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); - if (conn->ibc_rxs == NULL) { - CERROR("Cannot allocate RX buffers\n"); - goto failed_2; - } - - rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, - IBLND_RX_MSG_PAGES(conn)); - if (rc != 0) - goto failed_2; - - kiblnd_map_rx_descs(conn); - #ifdef HAVE_IB_CQ_INIT_ATTR cq_attr.cqe = IBLND_CQ_ENTRIES(conn); cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); @@ -820,6 +851,12 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kiblnd_get_completion_vector(conn, cpt)); #endif if (IS_ERR(cq)) { + /* + * on MLX-5 (possibly MLX-4 as well) this error could be + * hit if the concurrent_sends and/or peer_tx_credits is set + * too high. Or due to an MLX-5 bug which tries to + * allocate 256kb via kmalloc for WR cookie array + */ CERROR("Failed to create CQ with %d CQEs: %ld\n", IBLND_CQ_ENTRIES(conn), PTR_ERR(cq)); goto failed_2; @@ -833,39 +870,54 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, goto failed_2; } - init_qp_attr->event_handler = kiblnd_qp_event; - init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); - init_qp_attr->cap.max_send_sge = 1; - init_qp_attr->cap.max_recv_sge = 1; - init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; - init_qp_attr->qp_type = IB_QPT_RC; - init_qp_attr->send_cq = cq; - init_qp_attr->recv_cq = cq; + init_qp_attr.event_handler = kiblnd_qp_event; + init_qp_attr.qp_context = conn; + init_qp_attr.cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge; + init_qp_attr.cap.max_recv_sge = 1; + init_qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_qp_attr.qp_type = IB_QPT_RC; + init_qp_attr.send_cq = cq; + init_qp_attr.recv_cq = cq; + /* + * kiblnd_send_wrs() can change the connection's queue depth if + * the maximum work requests for the device is maxed out + */ + init_qp_attr.cap.max_send_wr = kiblnd_send_wrs(conn); + init_qp_attr.cap.max_recv_wr = IBLND_RECV_WRS(conn); + + rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, &init_qp_attr); + if (rc) { + CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, " + "send_sge: %d, recv_sge: %d\n", + rc, init_qp_attr.cap.max_send_wr, + init_qp_attr.cap.max_recv_wr, + init_qp_attr.cap.max_send_sge, + init_qp_attr.cap.max_recv_sge); + goto failed_2; + } conn->ibc_sched = sched; - do { - rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); - if (!rc || init_qp_attr->cap.max_send_wr < 16) - break; - - init_qp_attr->cap.max_send_wr -= init_qp_attr->cap.max_send_wr / 4; - } while (rc); + if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth) + CWARN("peer %s - queue depth reduced from %u to %u" + " to allow for qp creation\n", + libcfs_nid2str(peer_ni->ibp_nid), + peer_ni->ibp_queue_depth, + conn->ibc_queue_depth); - if (rc) { - CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", - rc, init_qp_attr->cap.max_send_wr, - init_qp_attr->cap.max_recv_wr); + LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, + IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); + if (conn->ibc_rxs == NULL) { + CERROR("Cannot allocate RX buffers\n"); goto failed_2; } - if (init_qp_attr->cap.max_send_wr != IBLND_SEND_WRS(conn)) - CDEBUG(D_NET, "original send wr %d, created with %d\n", - IBLND_SEND_WRS(conn), init_qp_attr->cap.max_send_wr); + rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, + IBLND_RX_MSG_PAGES(conn)); + if (rc != 0) + goto failed_2; - LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); + kiblnd_map_rx_descs(conn); /* 1 ref for caller and each rxmsg */ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); @@ -910,19 +962,17 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return conn; failed_2: - kiblnd_destroy_conn(conn, true); - failed_1: - LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); + kiblnd_destroy_conn(conn); + LIBCFS_FREE(conn, sizeof(*conn)); failed_0: return NULL; } void -kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) +kiblnd_destroy_conn(struct kib_conn *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - kib_peer_t *peer = conn->ibc_peer; - int rc; + struct kib_peer_ni *peer_ni = conn->ibc_peer; LASSERT (!in_interrupt()); LASSERT (atomic_read(&conn->ibc_refcount) == 0); @@ -953,19 +1003,17 @@ kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) if (cmid != NULL && cmid->qp != NULL) rdma_destroy_qp(cmid); - if (conn->ibc_cq != NULL) { - rc = ib_destroy_cq(conn->ibc_cq); - if (rc != 0) - CWARN("Error destroying CQ: %d\n", rc); - } + if (conn->ibc_cq) + ib_destroy_cq(conn->ibc_cq); + + kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED, + LNET_MSG_STATUS_OK); if (conn->ibc_rx_pages != NULL) kiblnd_unmap_rx_descs(conn); - if (conn->ibc_rxs != NULL) { - LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); - } + if (conn->ibc_rxs != NULL) + CFS_FREE_PTR_ARRAY(conn->ibc_rxs, IBLND_RX_MSGS(conn)); if (conn->ibc_connvars != NULL) LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); @@ -975,31 +1023,26 @@ kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) /* See CAVEAT EMPTOR above in kiblnd_create_conn */ if (conn->ibc_state != IBLND_CONN_INIT) { - kib_net_t *net = peer->ibp_ni->ni_data; + struct kib_net *net = peer_ni->ibp_ni->ni_data; - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); rdma_destroy_id(cmid); atomic_dec(&net->ibn_nconns); } - - if (free_conn) - LIBCFS_FREE(conn, sizeof(*conn)); } int -kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) +kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); + struct kib_conn *conn; + struct kib_conn *cnxt; + int count = 0; + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) { CDEBUG(D_NET, "Closing conn -> %s, " "version: %x, reason: %d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_version, why); kiblnd_close_conn_locked(conn, why); @@ -1010,24 +1053,22 @@ kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) } int -kiblnd_close_stale_conns_locked(kib_peer_t *peer, +kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni, int version, __u64 incarnation) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; - - list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { - conn = list_entry(ctmp, kib_conn_t, ibc_list); + struct kib_conn *conn; + struct kib_conn *cnxt; + int count = 0; + list_for_each_entry_safe(conn, cnxt, &peer_ni->ibp_conns, + ibc_list) { if (conn->ibc_version == version && conn->ibc_incarnation == incarnation) continue; CDEBUG(D_NET, "Closing stale conn -> %s version: %x, " "incarnation:%#llx(%x, %#llx)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_version, conn->ibc_incarnation, version, incarnation); @@ -1039,9 +1080,9 @@ kiblnd_close_stale_conns_locked(kib_peer_t *peer, } static int -kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) +kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid) { - kib_peer_t *peer; + struct kib_peer_ni *peer_ni; struct list_head *ptmp; struct list_head *pnxt; int lo; @@ -1062,16 +1103,16 @@ kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(!kiblnd_peer_idle(peer)); + peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); + LASSERT(!kiblnd_peer_idle(peer_ni)); - if (peer->ibp_ni != ni) + if (peer_ni->ibp_ni != ni) continue; - if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid)) + if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid)) continue; - count += kiblnd_close_peer_conns_locked(peer, 0); + count += kiblnd_close_peer_conns_locked(peer_ni, 0); } } @@ -1085,7 +1126,7 @@ kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) } static int -kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; @@ -1107,7 +1148,7 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } case IOC_LIBCFS_GET_CONN: { - kib_conn_t *conn; + struct kib_conn *conn; rc = 0; conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); @@ -1116,15 +1157,15 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } - LASSERT (conn->ibc_cmid != NULL); - data->ioc_nid = conn->ibc_peer->ibp_nid; - if (conn->ibc_cmid->route.path_rec == NULL) - data->ioc_u32[0] = 0; /* iWarp has no path MTU */ - else - data->ioc_u32[0] = - ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu); - kiblnd_conn_decref(conn); - break; + LASSERT(conn->ibc_cmid != NULL); + data->ioc_nid = conn->ibc_peer->ibp_nid; + if (conn->ibc_cmid->route.path_rec == NULL) + data->ioc_u32[0] = 0; /* iWarp has no path MTU */ + else + data->ioc_u32[0] = + ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu); + kiblnd_conn_decref(conn); + break; } case IOC_LIBCFS_CLOSE_CONNECTION: { rc = kiblnd_close_matching_conns(ni, data->ioc_nid); @@ -1139,38 +1180,7 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) } static void -kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) -{ - cfs_time_t last_alive = 0; - cfs_time_t now = cfs_time_current(); - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer; - unsigned long flags; - - read_lock_irqsave(glock, flags); - - peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) - last_alive = peer->ibp_last_alive; - - read_unlock_irqrestore(glock, flags); - - if (last_alive != 0) - *when = last_alive; - - /* peer is not persistent in hash, trigger peer creation - * and connection establishment with a NULL tx */ - if (peer == NULL) - kiblnd_launch_tx(ni, NULL, nid); - - CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", - libcfs_nid2str(nid), peer, - last_alive ? cfs_duration_sec(now - last_alive) : -1); - return; -} - -static void -kiblnd_free_pages(kib_pages_t *p) +kiblnd_free_pages(struct kib_pages *p) { int npages = p->ibp_npages; int i; @@ -1180,23 +1190,23 @@ kiblnd_free_pages(kib_pages_t *p) __free_page(p->ibp_pages[i]); } - LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); + LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages])); } int -kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) +kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) { - kib_pages_t *p; - int i; + struct kib_pages *p; + int i; LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, - offsetof(kib_pages_t, ibp_pages[npages])); + offsetof(struct kib_pages, ibp_pages[npages])); if (p == NULL) { CERROR("Can't allocate descriptor for %d pages\n", npages); return -ENOMEM; } - memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); + memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages])); p->ibp_npages = npages; for (i = 0; i < npages; i++) { @@ -1214,9 +1224,9 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) } void -kiblnd_unmap_rx_descs(kib_conn_t *conn) +kiblnd_unmap_rx_descs(struct kib_conn *conn) { - kib_rx_t *rx; + struct kib_rx *rx; int i; LASSERT (conn->ibc_rxs != NULL); @@ -1239,9 +1249,9 @@ kiblnd_unmap_rx_descs(kib_conn_t *conn) } void -kiblnd_map_rx_descs(kib_conn_t *conn) +kiblnd_map_rx_descs(struct kib_conn *conn) { - kib_rx_t *rx; + struct kib_rx *rx; struct page *pg; int pg_off; int ipg; @@ -1252,7 +1262,7 @@ kiblnd_map_rx_descs(kib_conn_t *conn) rx = &conn->ibc_rxs[i]; rx->rx_conn = conn; - rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off); + rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off); rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev, @@ -1278,11 +1288,11 @@ kiblnd_map_rx_descs(kib_conn_t *conn) } static void -kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) +kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo) { - kib_hca_dev_t *hdev = tpo->tpo_hdev; - kib_tx_t *tx; - int i; + struct kib_hca_dev *hdev = tpo->tpo_hdev; + struct kib_tx *tx; + int i; LASSERT (tpo->tpo_pool.po_allocated == 0); @@ -1301,10 +1311,10 @@ kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev = NULL; } -static kib_hca_dev_t * -kiblnd_current_hdev(kib_dev_t *dev) +static struct kib_hca_dev * +kiblnd_current_hdev(struct kib_dev *dev) { - kib_hca_dev_t *hdev; + struct kib_hca_dev *hdev; unsigned long flags; int i = 0; @@ -1314,8 +1324,7 @@ kiblnd_current_hdev(kib_dev_t *dev) if (i++ % 50 == 0) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 100); + schedule_timeout_interruptible(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); } @@ -1329,14 +1338,14 @@ kiblnd_current_hdev(kib_dev_t *dev) } static void -kiblnd_map_tx_pool(kib_tx_pool_t *tpo) -{ - kib_pages_t *txpgs = tpo->tpo_tx_pages; - kib_pool_t *pool = &tpo->tpo_pool; - kib_net_t *net = pool->po_owner->ps_net; - kib_dev_t *dev; - struct page *page; - kib_tx_t *tx; +kiblnd_map_tx_pool(struct kib_tx_pool *tpo) +{ + struct kib_pages *txpgs = tpo->tpo_tx_pages; + struct kib_pool *pool = &tpo->tpo_pool; + struct kib_net *net = pool->po_owner->ps_net; + struct kib_dev *dev; + struct page *page; + struct kib_tx *tx; int page_offset; int ipage; int i; @@ -1345,11 +1354,11 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo) dev = net->ibn_dev; - /* pre-mapped messages are not bigger than 1 page */ - CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE); + /* pre-mapped messages are not bigger than 1 page */ + BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE); - /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0); + /* No fancy arithmetic when we do the buffer calculations */ + BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE != 0); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1357,8 +1366,8 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo) page = txpgs->ibp_pages[ipage]; tx = &tpo->tpo_tx_descs[i]; - tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + - page_offset); + tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) + + page_offset); tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, @@ -1381,36 +1390,13 @@ kiblnd_map_tx_pool(kib_tx_pool_t *tpo) } } -struct ib_mr * -kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd, - int negotiated_nfrags) -{ - kib_net_t *net = ni->ni_data; - kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev; - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - int mod; - __u16 nfrags; - - tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; - mod = tunables->lnd_map_on_demand; - nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod; - - LASSERT(hdev->ibh_mrs != NULL); - - if (mod > 0 && nfrags <= rd->rd_nfrags) - return NULL; - - return hdev->ibh_mrs; -} - static void -kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo) +kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo) { LASSERT(fpo->fpo_map_count == 0); - if (fpo->fpo_is_fmr) { - if (fpo->fmr.fpo_fmr_pool) - ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool); + if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) { + ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool); } else { struct kib_fast_reg_descriptor *frd, *tmp; int i = 0; @@ -1439,7 +1425,7 @@ kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo) static void kiblnd_destroy_fmr_pool_list(struct list_head *head) { - kib_fmr_pool_t *fpo, *tmp; + struct kib_fmr_pool *fpo, *tmp; list_for_each_entry_safe(fpo, tmp, head, fpo_list) { list_del(&fpo->fpo_list); @@ -1465,10 +1451,11 @@ kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables, return max(IBLND_FMR_POOL_FLUSH, size); } -static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo) +static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, + struct kib_fmr_pool *fpo) { struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .max_pages_per_fmr = LNET_MAX_IOV, .page_shift = PAGE_SHIFT, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE), @@ -1488,15 +1475,20 @@ static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo) else CERROR("FMRs are not supported\n"); } + fpo->fpo_is_fmr = true; return rc; } -static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo) +static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, + struct kib_fmr_pool *fpo, + enum kib_dev_caps dev_caps) { struct kib_fast_reg_descriptor *frd, *tmp; int i, rc; + fpo->fpo_is_fmr = false; + INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); fpo->fast_reg.fpo_pool_size = 0; for (i = 0; i < fps->fps_pool_size; i++) { @@ -1511,7 +1503,7 @@ static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo) #ifndef HAVE_IB_MAP_MR_SG frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev, - LNET_MAX_PAYLOAD/PAGE_SIZE); + LNET_MAX_IOV); if (IS_ERR(frd->frd_frpl)) { rc = PTR_ERR(frd->frd_frpl); CERROR("Failed to allocate ib_fast_reg_page_list: %d\n", @@ -1523,11 +1515,28 @@ static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo) #ifdef HAVE_IB_ALLOC_FAST_REG_MR frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd, - LNET_MAX_PAYLOAD/PAGE_SIZE); + LNET_MAX_IOV); #else + /* + * it is expected to get here if this is an MLX-5 card. + * MLX-4 cards will always use FMR and MLX-5 cards will + * always use fast_reg. It turns out that some MLX-5 cards + * (possibly due to older FW versions) do not natively support + * gaps. So we will need to track them here. + */ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd, - IB_MR_TYPE_MEM_REG, - LNET_MAX_PAYLOAD/PAGE_SIZE); +#ifdef IB_MR_TYPE_SG_GAPS + ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) && + (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) ? + IB_MR_TYPE_SG_GAPS : + IB_MR_TYPE_MEM_REG, +#else + IB_MR_TYPE_MEM_REG, +#endif + LNET_MAX_IOV); + if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) && + (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) + CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n"); #endif if (IS_ERR(frd->frd_mr)) { rc = PTR_ERR(frd->frd_mr); @@ -1570,67 +1579,30 @@ out: return rc; } -static int -kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) +static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, + struct kib_fmr_pool **pp_fpo) { - struct ib_device_attr *dev_attr; - kib_dev_t *dev = fps->fps_net->ibn_dev; - kib_fmr_pool_t *fpo; + struct kib_dev *dev = fps->fps_net->ibn_dev; + struct kib_fmr_pool *fpo; int rc; -#ifndef HAVE_IB_DEVICE_ATTRS - dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL); - if (!dev_attr) - return -ENOMEM; -#endif - LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); if (!fpo) { - rc = -ENOMEM; - goto out_dev_attr; + return -ENOMEM; } + memset(fpo, 0, sizeof(*fpo)); fpo->fpo_hdev = kiblnd_current_hdev(dev); -#ifdef HAVE_IB_DEVICE_ATTRS - dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs; -#else - rc = ib_query_device(fpo->fpo_hdev->ibh_ibdev, dev_attr); - if (rc) { - CERROR("Query device failed for %s: %d\n", - fpo->fpo_hdev->ibh_ibdev->name, rc); - goto out_dev_attr; - } -#endif - - /* Check for FMR or FastReg support */ - fpo->fpo_is_fmr = 0; - if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr && - fpo->fpo_hdev->ibh_ibdev->dealloc_fmr && - fpo->fpo_hdev->ibh_ibdev->map_phys_fmr && - fpo->fpo_hdev->ibh_ibdev->unmap_fmr) { - LCONSOLE_INFO("Using FMR for registration\n"); - fpo->fpo_is_fmr = 1; - } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - LCONSOLE_INFO("Using FastReg for registration\n"); - } else { - rc = -ENOSYS; - LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n"); - goto out_dev_attr; - } - - if (fpo->fpo_is_fmr) + if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) rc = kiblnd_alloc_fmr_pool(fps, fpo); else - rc = kiblnd_alloc_freg_pool(fps, fpo); + rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps); if (rc) goto out_fpo; -#ifndef HAVE_IB_DEVICE_ATTRS - kfree(dev_attr); -#endif - fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); - fpo->fpo_owner = fps; + fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE; + fpo->fpo_owner = fps; *pp_fpo = fpo; return 0; @@ -1638,17 +1610,11 @@ kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) out_fpo: kiblnd_hdev_decref(fpo->fpo_hdev); LIBCFS_FREE(fpo, sizeof(*fpo)); - -out_dev_attr: -#ifndef HAVE_IB_DEVICE_ATTRS - kfree(dev_attr); -#endif - return rc; } static void -kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) +kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies) { if (fps->fps_net == NULL) /* intialized? */ return; @@ -1656,21 +1622,22 @@ kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) spin_lock(&fps->fps_lock); while (!list_empty(&fps->fps_pool_list)) { - kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next, - kib_fmr_pool_t, fpo_list); + struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next, + struct kib_fmr_pool, + fpo_list); + fpo->fpo_failed = 1; - list_del(&fpo->fpo_list); if (fpo->fpo_map_count == 0) - list_add(&fpo->fpo_list, zombies); + list_move(&fpo->fpo_list, zombies); else - list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); + list_move(&fpo->fpo_list, &fps->fps_failed_pool_list); } spin_unlock(&fps->fps_lock); } static void -kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) +kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps) { if (fps->fps_net != NULL) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); @@ -1679,14 +1646,14 @@ kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) } static int -kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts, - kib_net_t *net, +kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts, + struct kib_net *net, struct lnet_ioctl_config_o2iblnd_tunables *tunables) { - kib_fmr_pool_t *fpo; - int rc; + struct kib_fmr_pool *fpo; + int rc; - memset(fps, 0, sizeof(kib_fmr_poolset_t)); + memset(fps, 0, sizeof(struct kib_fmr_poolset)); fps->fps_net = net; fps->fps_cpt = cpt; @@ -1707,19 +1674,19 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts, } static int -kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now) +kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now) { if (fpo->fpo_map_count != 0) /* still in use */ return 0; if (fpo->fpo_failed) return 1; - return cfs_time_aftereq(now, fpo->fpo_deadline); + return now >= fpo->fpo_deadline; } static int -kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd) +kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd) { - kib_hca_dev_t *hdev; + struct kib_hca_dev *hdev; __u64 *pages = tx->tx_pages; int npages; int size; @@ -1739,14 +1706,14 @@ kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd) } void -kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) +kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) { - struct list_head zombies = LIST_HEAD_INIT(zombies); - kib_fmr_pool_t *fpo = fmr->fmr_pool; - kib_fmr_poolset_t *fps; - cfs_time_t now = cfs_time_current(); - kib_fmr_pool_t *tmp; - int rc; + LIST_HEAD(zombies); + struct kib_fmr_pool *fpo = fmr->fmr_pool; + struct kib_fmr_poolset *fps; + time64_t now = ktime_get_seconds(); + struct kib_fmr_pool *tmp; + int rc; if (!fpo) return; @@ -1754,8 +1721,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) fps = fpo->fpo_owner; if (fpo->fpo_is_fmr) { if (fmr->fmr_pfmr) { - rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(!rc); + ib_fmr_pool_unmap(fmr->fmr_pfmr); fmr->fmr_pfmr = NULL; } @@ -1795,11 +1761,11 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) kiblnd_destroy_fmr_pool_list(&zombies); } -int -kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd, - __u32 nob, __u64 iov, kib_fmr_t *fmr) +int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, + struct kib_rdma_desc *rd, u32 nob, u64 iov, + struct kib_fmr *fmr) { - kib_fmr_pool_t *fpo; + struct kib_fmr_pool *fpo; __u64 *pages = tx->tx_pages; __u64 version; bool is_rx = (rd != tx->tx_rd); @@ -1811,7 +1777,7 @@ again: spin_lock(&fps->fps_lock); version = fps->fps_version; list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) { - fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE; fpo->fpo_map_count++; if (fpo->fpo_is_fmr) { @@ -1824,8 +1790,8 @@ again: tx_pages_mapped = 1; } - pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool, - pages, npages, iov); + pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool, + pages, npages, iov); if (likely(!IS_ERR(pfmr))) { fmr->fmr_key = is_rx ? pfmr->fmr->rkey : pfmr->fmr->lkey; @@ -1877,19 +1843,17 @@ again: #ifdef HAVE_IB_MAP_MR_SG #ifdef HAVE_IB_MAP_MR_SG_5ARGS n = ib_map_mr_sg(mr, tx->tx_frags, - tx->tx_nfrags, NULL, PAGE_SIZE); + rd->rd_nfrags, NULL, PAGE_SIZE); #else n = ib_map_mr_sg(mr, tx->tx_frags, - tx->tx_nfrags, PAGE_SIZE); + rd->rd_nfrags, PAGE_SIZE); #endif - if (unlikely(n != tx->tx_nfrags)) { + if (unlikely(n != rd->rd_nfrags)) { CERROR("Failed to map mr %d/%d " - "elements\n", n, tx->tx_nfrags); + "elements\n", n, rd->rd_nfrags); return n < 0 ? n : -EINVAL; } - mr->iova = iov; - wr = &frd->frd_fastreg_wr; memset(wr, 0, sizeof(*wr)); @@ -1937,7 +1901,7 @@ again: return 0; } spin_unlock(&fps->fps_lock); - rc = -EBUSY; + rc = -EAGAIN; } spin_lock(&fps->fps_lock); @@ -1958,12 +1922,12 @@ again: spin_unlock(&fps->fps_lock); CDEBUG(D_NET, "Another thread is allocating new " "FMR pool, waiting for her to complete\n"); - schedule(); + wait_var_event(fps, !fps->fps_increasing); goto again; } - if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) { + if (ktime_get_seconds() < fps->fps_next_retry) { /* someone failed recently */ spin_unlock(&fps->fps_lock); return -EAGAIN; @@ -1976,11 +1940,12 @@ again: rc = kiblnd_create_fmr_pool(fps, &fpo); spin_lock(&fps->fps_lock); fps->fps_increasing = 0; + wake_up_var(fps); if (rc == 0) { fps->fps_version++; list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); } else { - fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); + fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY; } spin_unlock(&fps->fps_lock); @@ -1988,7 +1953,7 @@ again: } static void -kiblnd_fini_pool(kib_pool_t *pool) +kiblnd_fini_pool(struct kib_pool *pool) { LASSERT(list_empty(&pool->po_free_list)); LASSERT(pool->po_allocated == 0); @@ -1997,24 +1962,24 @@ kiblnd_fini_pool(kib_pool_t *pool) } static void -kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) +kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); - memset(pool, 0, sizeof(kib_pool_t)); + memset(pool, 0, sizeof(struct kib_pool)); INIT_LIST_HEAD(&pool->po_free_list); - pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); - pool->po_owner = ps; - pool->po_size = size; + pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE; + pool->po_owner = ps; + pool->po_size = size; } static void kiblnd_destroy_pool_list(struct list_head *head) { - kib_pool_t *pool; + struct kib_pool *pool; while (!list_empty(head)) { - pool = list_entry(head->next, kib_pool_t, po_list); + pool = list_entry(head->next, struct kib_pool, po_list); list_del(&pool->po_list); LASSERT(pool->po_owner != NULL); @@ -2023,27 +1988,27 @@ kiblnd_destroy_pool_list(struct list_head *head) } static void -kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) +kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies) { if (ps->ps_net == NULL) /* intialized? */ return; spin_lock(&ps->ps_lock); while (!list_empty(&ps->ps_pool_list)) { - kib_pool_t *po = list_entry(ps->ps_pool_list.next, - kib_pool_t, po_list); + struct kib_pool *po = list_entry(ps->ps_pool_list.next, + struct kib_pool, po_list); + po->po_failed = 1; - list_del(&po->po_list); if (po->po_allocated == 0) - list_add(&po->po_list, zombies); + list_move(&po->po_list, zombies); else - list_add(&po->po_list, &ps->ps_failed_pool_list); + list_move(&po->po_list, &ps->ps_failed_pool_list); } spin_unlock(&ps->ps_lock); } static void -kiblnd_fini_poolset(kib_poolset_t *ps) +kiblnd_fini_poolset(struct kib_poolset *ps) { if (ps->ps_net != NULL) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); @@ -2052,17 +2017,17 @@ kiblnd_fini_poolset(kib_poolset_t *ps) } static int -kiblnd_init_poolset(kib_poolset_t *ps, int cpt, - kib_net_t *net, char *name, int size, +kiblnd_init_poolset(struct kib_poolset *ps, int cpt, + struct kib_net *net, char *name, int size, kib_ps_pool_create_t po_create, kib_ps_pool_destroy_t po_destroy, kib_ps_node_init_t nd_init, kib_ps_node_fini_t nd_fini) { - kib_pool_t *pool; - int rc; + struct kib_pool *pool; + int rc; - memset(ps, 0, sizeof(kib_poolset_t)); + memset(ps, 0, sizeof(struct kib_poolset)); ps->ps_cpt = cpt; ps->ps_net = net; @@ -2088,22 +2053,22 @@ kiblnd_init_poolset(kib_poolset_t *ps, int cpt, } static int -kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now) +kiblnd_pool_is_idle(struct kib_pool *pool, time64_t now) { if (pool->po_allocated != 0) /* still in use */ return 0; if (pool->po_failed) return 1; - return cfs_time_aftereq(now, pool->po_deadline); + return now >= pool->po_deadline; } void -kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) +kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node) { - struct list_head zombies = LIST_HEAD_INIT(zombies); - kib_poolset_t *ps = pool->po_owner; - kib_pool_t *tmp; - cfs_time_t now = cfs_time_current(); + LIST_HEAD(zombies); + struct kib_poolset *ps = pool->po_owner; + struct kib_pool *tmp; + time64_t now = ktime_get_seconds(); spin_lock(&ps->ps_lock); @@ -2129,14 +2094,14 @@ kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) } struct list_head * -kiblnd_pool_alloc_node(kib_poolset_t *ps) +kiblnd_pool_alloc_node(struct kib_poolset *ps) { struct list_head *node; - kib_pool_t *pool; + struct kib_pool *pool; int rc; unsigned int interval = 1; - cfs_time_t time_before; - unsigned int trips = 0; + ktime_t time_before; + unsigned int trips = 0; again: spin_lock(&ps->ps_lock); @@ -2145,7 +2110,8 @@ again: continue; pool->po_allocated++; - pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + pool->po_deadline = ktime_get_seconds() + + IBLND_POOL_DEADLINE; node = pool->po_free_list.next; list_del(node); @@ -2162,20 +2128,18 @@ again: /* another thread is allocating a new pool */ spin_unlock(&ps->ps_lock); trips++; - CDEBUG(D_NET, "Another thread is allocating new " - "%s pool, waiting %d HZs for her to complete." - "trips = %d\n", + CDEBUG(D_NET, + "Another thread is allocating new %s pool, waiting %d jiffies for her to complete. trips = %d\n", ps->ps_name, interval, trips); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(interval); + schedule_timeout_interruptible(interval); if (interval < cfs_time_seconds(1)) interval *= 2; goto again; } - if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) { + if (ktime_get_seconds() < ps->ps_next_retry) { /* someone failed recently */ spin_unlock(&ps->ps_lock); return NULL; @@ -2185,17 +2149,17 @@ again: spin_unlock(&ps->ps_lock); CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); - time_before = cfs_time_current(); + time_before = ktime_get(); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); - CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", - cfs_time_current() - time_before); + CDEBUG(D_NET, "ps_pool_create took %lld ms to complete", + ktime_ms_delta(ktime_get(), time_before)); spin_lock(&ps->ps_lock); ps->ps_increasing = 0; if (rc == 0) { list_add_tail(&pool->po_list, &ps->ps_pool_list); } else { - ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); + ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY; CERROR("Can't allocate new %s pool because out of memory\n", ps->ps_name); } @@ -2205,10 +2169,11 @@ again: } static void -kiblnd_destroy_tx_pool(kib_pool_t *pool) +kiblnd_destroy_tx_pool(struct kib_pool *pool) { - kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); - int i; + struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, + tpo_pool); + int i; LASSERT (pool->po_allocated == 0); @@ -2220,53 +2185,53 @@ kiblnd_destroy_tx_pool(kib_pool_t *pool) if (tpo->tpo_tx_descs == NULL) goto out; - for (i = 0; i < pool->po_size; i++) { - kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + for (i = 0; i < pool->po_size; i++) { + struct kib_tx *tx = &tpo->tpo_tx_descs[i]; + int wrq_sge = *kiblnd_tunables.kib_wrq_sge; list_del(&tx->tx_list); - if (tx->tx_pages != NULL) - LIBCFS_FREE(tx->tx_pages, - LNET_MAX_IOV * - sizeof(*tx->tx_pages)); - if (tx->tx_frags != NULL) - LIBCFS_FREE(tx->tx_frags, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_frags)); - if (tx->tx_wrq != NULL) - LIBCFS_FREE(tx->tx_wrq, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_wrq)); - if (tx->tx_sge != NULL) - LIBCFS_FREE(tx->tx_sge, - (1 + IBLND_MAX_RDMA_FRAGS) * - sizeof(*tx->tx_sge)); - if (tx->tx_rd != NULL) - LIBCFS_FREE(tx->tx_rd, - offsetof(kib_rdma_desc_t, - rd_frags[IBLND_MAX_RDMA_FRAGS])); - } - - LIBCFS_FREE(tpo->tpo_tx_descs, - pool->po_size * sizeof(kib_tx_t)); + if (tx->tx_pages != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_pages, LNET_MAX_IOV); + if (tx->tx_frags != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_frags, + (1 + IBLND_MAX_RDMA_FRAGS)); + if (tx->tx_wrq != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_wrq, + (1 + IBLND_MAX_RDMA_FRAGS)); + if (tx->tx_sge != NULL) + CFS_FREE_PTR_ARRAY(tx->tx_sge, + (1 + IBLND_MAX_RDMA_FRAGS) * + wrq_sge); + if (tx->tx_rd != NULL) + LIBCFS_FREE(tx->tx_rd, + offsetof(struct kib_rdma_desc, + rd_frags[IBLND_MAX_RDMA_FRAGS])); + } + + CFS_FREE_PTR_ARRAY(tpo->tpo_tx_descs, pool->po_size); out: - kiblnd_fini_pool(pool); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + kiblnd_fini_pool(pool); + CFS_FREE_PTR(tpo); } -static int kiblnd_tx_pool_size(int ncpts) +static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts) { - int ntx = *kiblnd_tunables.kib_ntx / ncpts; + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + int ntx; + + tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; + ntx = tunables->lnd_ntx / ncpts; return max(IBLND_TX_POOL, ntx); } static int -kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) +kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po) { int i; int npg; - kib_pool_t *pool; - kib_tx_pool_t *tpo; + struct kib_pool *pool; + struct kib_tx_pool *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); if (tpo == NULL) { @@ -2282,22 +2247,23 @@ kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { CERROR("Can't allocate tx pages: %d\n", npg); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + CFS_FREE_PTR(tpo); return -ENOMEM; } LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, - size * sizeof(kib_tx_t)); + size * sizeof(struct kib_tx)); if (tpo->tpo_tx_descs == NULL) { CERROR("Can't allocate %d tx descriptors\n", size); ps->ps_pool_destroy(pool); return -ENOMEM; } - memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t)); + memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx)); for (i = 0; i < size; i++) { - kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + struct kib_tx *tx = &tpo->tpo_tx_descs[i]; + int wrq_sge = *kiblnd_tunables.kib_wrq_sge; tx->tx_pool = tpo; if (ps->ps_net->ibn_fmr_ps != NULL) { @@ -2323,13 +2289,13 @@ kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) break; LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, - (1 + IBLND_MAX_RDMA_FRAGS) * + (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge * sizeof(*tx->tx_sge)); if (tx->tx_sge == NULL) break; LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, - offsetof(kib_rdma_desc_t, + offsetof(struct kib_rdma_desc, rd_frags[IBLND_MAX_RDMA_FRAGS])); if (tx->tx_rd == NULL) break; @@ -2346,23 +2312,24 @@ kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) } static void -kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) +kiblnd_tx_init(struct kib_pool *pool, struct list_head *node) { - kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, - tps_poolset); - kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); + struct kib_tx_poolset *tps = container_of(pool->po_owner, + struct kib_tx_poolset, + tps_poolset); + struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list); tx->tx_cookie = tps->tps_next_tx_cookie++; } static void -kiblnd_net_fini_pools(kib_net_t *net) +kiblnd_net_fini_pools(struct kib_net *net) { int i; cfs_cpt_for_each(i, lnet_cpt_table()) { - kib_tx_poolset_t *tps; - kib_fmr_poolset_t *fps; + struct kib_tx_poolset *tps; + struct kib_fmr_poolset *fps; if (net->ibn_tx_ps != NULL) { tps = net->ibn_tx_ps[i]; @@ -2387,29 +2354,39 @@ kiblnd_net_fini_pools(kib_net_t *net) } static int -kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts) +kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts, + int ncpts) { struct lnet_ioctl_config_o2iblnd_tunables *tunables; +#ifdef HAVE_IB_GET_DMA_MR unsigned long flags; +#endif int cpt; int rc; int i; tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; +#ifdef HAVE_IB_GET_DMA_MR read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (tunables->lnd_map_on_demand == 0) { + /* + * if lnd_map_on_demand is zero then we have effectively disabled + * FMR or FastReg and we're using global memory regions + * exclusively. + */ + if (!tunables->lnd_map_on_demand) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); +#endif - if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) { + if (tunables->lnd_fmr_pool_size < tunables->lnd_ntx / 4) { CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", tunables->lnd_fmr_pool_size, - *kiblnd_tunables.kib_ntx / 4); + tunables->lnd_ntx / 4); rc = -EINVAL; goto failed; } @@ -2422,7 +2399,7 @@ kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts) * FMR pool and map-on-demand if premapping failed */ net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(kib_fmr_poolset_t)); + sizeof(struct kib_fmr_poolset)); if (net->ibn_fmr_ps == NULL) { CERROR("Failed to allocate FMR pool array\n"); rc = -ENOMEM; @@ -2443,9 +2420,11 @@ kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts) if (i > 0) LASSERT(i == ncpts); +#ifdef HAVE_IB_GET_DMA_MR create_tx_pool: +#endif net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(kib_tx_poolset_t)); + sizeof(struct kib_tx_poolset)); if (net->ibn_tx_ps == NULL) { CERROR("Failed to allocate tx pool array\n"); rc = -ENOMEM; @@ -2456,7 +2435,7 @@ kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts) cpt = (cpts == NULL) ? i : cpts[i]; rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, cpt, net, "TX", - kiblnd_tx_pool_size(ncpts), + kiblnd_tx_pool_size(ni, ncpts), kiblnd_create_tx_pool, kiblnd_destroy_tx_pool, kiblnd_tx_init, NULL); @@ -2475,51 +2454,171 @@ kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts) } static int -kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) +kiblnd_port_get_attr(struct kib_hca_dev *hdev) +{ + struct ib_port_attr *port_attr; + int rc; + unsigned long flags; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + + LIBCFS_ALLOC(port_attr, sizeof(*port_attr)); + if (port_attr == NULL) { + CDEBUG(D_NETERROR, "Out of memory\n"); + return -ENOMEM; + } + + rc = ib_query_port(hdev->ibh_ibdev, hdev->ibh_port, port_attr); + + write_lock_irqsave(g_lock, flags); + + if (rc == 0) + hdev->ibh_state = port_attr->state == IB_PORT_ACTIVE + ? IBLND_DEV_PORT_ACTIVE + : IBLND_DEV_PORT_DOWN; + + write_unlock_irqrestore(g_lock, flags); + LIBCFS_FREE(port_attr, sizeof(*port_attr)); + + if (rc != 0) { + CDEBUG(D_NETERROR, "Failed to query IB port: %d\n", rc); + return rc; + } + return 0; +} + +static inline void +kiblnd_set_ni_fatal_on(struct kib_hca_dev *hdev, int val) +{ + struct kib_net *net; + + /* for health check */ + list_for_each_entry(net, &hdev->ibh_dev->ibd_nets, ibn_list) { + if (val) + CDEBUG(D_NETERROR, "Fatal device error for NI %s\n", + libcfs_nid2str(net->ibn_ni->ni_nid)); + atomic_set(&net->ibn_ni->ni_fatal_error_on, val); + } +} + +void +kiblnd_event_handler(struct ib_event_handler *handler, struct ib_event *event) +{ + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + struct kib_hca_dev *hdev; + unsigned long flags; + + hdev = container_of(handler, struct kib_hca_dev, ibh_event_handler); + + write_lock_irqsave(g_lock, flags); + + switch (event->event) { + case IB_EVENT_DEVICE_FATAL: + CDEBUG(D_NET, "IB device fatal\n"); + hdev->ibh_state = IBLND_DEV_FATAL; + kiblnd_set_ni_fatal_on(hdev, 1); + break; + case IB_EVENT_PORT_ACTIVE: + CDEBUG(D_NET, "IB port active\n"); + if (event->element.port_num == hdev->ibh_port) { + hdev->ibh_state = IBLND_DEV_PORT_ACTIVE; + kiblnd_set_ni_fatal_on(hdev, 0); + } + break; + case IB_EVENT_PORT_ERR: + CDEBUG(D_NET, "IB port err\n"); + if (event->element.port_num == hdev->ibh_port) { + hdev->ibh_state = IBLND_DEV_PORT_DOWN; + kiblnd_set_ni_fatal_on(hdev, 1); + } + break; + default: + break; + } + write_unlock_irqrestore(g_lock, flags); +} + +static int +kiblnd_hdev_get_attr(struct kib_hca_dev *hdev) { + struct ib_device_attr *dev_attr; + int rc = 0; + int rc2 = 0; + + /* It's safe to assume a HCA can handle a page size + * matching that of the native system */ + hdev->ibh_page_shift = PAGE_SHIFT; + hdev->ibh_page_size = 1 << PAGE_SHIFT; + hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); + #ifndef HAVE_IB_DEVICE_ATTRS - struct ib_device_attr *attr; - int rc; + LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr)); + if (dev_attr == NULL) { + CERROR("Out of memory\n"); + return -ENOMEM; + } + + rc = ib_query_device(hdev->ibh_ibdev, dev_attr); + if (rc != 0) { + CERROR("Failed to query IB device: %d\n", rc); + goto out_clean_attr; + } +#else + dev_attr = &hdev->ibh_ibdev->attrs; #endif - /* It's safe to assume a HCA can handle a page size - * matching that of the native system */ - hdev->ibh_page_shift = PAGE_SHIFT; - hdev->ibh_page_size = 1 << PAGE_SHIFT; - hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); + hdev->ibh_mr_size = dev_attr->max_mr_size; + hdev->ibh_max_qp_wr = dev_attr->max_qp_wr; -#ifdef HAVE_IB_DEVICE_ATTRS - hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size; + /* Setup device Memory Registration capabilities */ +#ifdef HAVE_IB_DEVICE_OPS + if (hdev->ibh_ibdev->ops.alloc_fmr && + hdev->ibh_ibdev->ops.dealloc_fmr && + hdev->ibh_ibdev->ops.map_phys_fmr && + hdev->ibh_ibdev->ops.unmap_fmr) { #else - LIBCFS_ALLOC(attr, sizeof(*attr)); - if (attr == NULL) { - CERROR("Out of memory\n"); - return -ENOMEM; - } + if (hdev->ibh_ibdev->alloc_fmr && + hdev->ibh_ibdev->dealloc_fmr && + hdev->ibh_ibdev->map_phys_fmr && + hdev->ibh_ibdev->unmap_fmr) { +#endif + LCONSOLE_INFO("Using FMR for registration\n"); + hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED; + } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + LCONSOLE_INFO("Using FastReg for registration\n"); + hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED; +#ifndef HAVE_IB_ALLOC_FAST_REG_MR +#ifdef IB_DEVICE_SG_GAPS_REG + if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG) + hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT; +#endif +#endif + } else { + rc = -ENOSYS; + } - rc = ib_query_device(hdev->ibh_ibdev, attr); - if (rc == 0) - hdev->ibh_mr_size = attr->max_mr_size; + rc2 = kiblnd_port_get_attr(hdev); + if (rc2 != 0) + return rc2; - LIBCFS_FREE(attr, sizeof(*attr)); + if (rc != 0) + rc = -EINVAL; - if (rc != 0) { - CERROR("Failed to query IB device: %d\n", rc); - return rc; - } +#ifndef HAVE_IB_DEVICE_ATTRS +out_clean_attr: + LIBCFS_FREE(dev_attr, sizeof(*dev_attr)); #endif - if (hdev->ibh_mr_size == ~0ULL) { - hdev->ibh_mr_shift = 64; - return 0; - } - - CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); - return -EINVAL; + if (rc == -ENOSYS) + CERROR("IB device does not support FMRs nor FastRegs, can't " + "register memory: %d\n", rc); + else if (rc == -EINVAL) + CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); + return rc; } +#ifdef HAVE_IB_GET_DMA_MR static void -kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) +kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev) { if (hdev->ibh_mrs == NULL) return; @@ -2528,11 +2627,17 @@ kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) hdev->ibh_mrs = NULL; } +#endif void -kiblnd_hdev_destroy(kib_hca_dev_t *hdev) +kiblnd_hdev_destroy(struct kib_hca_dev *hdev) { + if (hdev->ibh_event_handler.device != NULL) + ib_unregister_event_handler(&hdev->ibh_event_handler); + +#ifdef HAVE_IB_GET_DMA_MR kiblnd_hdev_cleanup_mrs(hdev); +#endif if (hdev->ibh_pd != NULL) ib_dealloc_pd(hdev->ibh_pd); @@ -2543,18 +2648,14 @@ kiblnd_hdev_destroy(kib_hca_dev_t *hdev) LIBCFS_FREE(hdev, sizeof(*hdev)); } +#ifdef HAVE_IB_GET_DMA_MR static int -kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) +kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev) { struct ib_mr *mr; - int rc; int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; - rc = kiblnd_hdev_get_attr(hdev); - if (rc != 0) - return rc; - mr = ib_get_dma_mr(hdev->ibh_pd, acflags); if (IS_ERR(mr)) { CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr)); @@ -2566,6 +2667,7 @@ kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) return 0; } +#endif static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) @@ -2574,7 +2676,7 @@ kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } static int -kiblnd_dev_need_failover(kib_dev_t *dev) +kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns) { struct rdma_cm_id *cmid; struct sockaddr_in srcaddr; @@ -2596,8 +2698,8 @@ kiblnd_dev_need_failover(kib_dev_t *dev) * * a. rdma_bind_addr(), it will conflict with listener cmid * b. rdma_resolve_addr() to zero addr */ - cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, - IB_QPT_RC); + cmid = kiblnd_rdma_create_id(ns, kiblnd_dummy_callback, dev, + RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { rc = PTR_ERR(cmid); CERROR("Failed to create cmid for failover: %d\n", rc); @@ -2626,16 +2728,16 @@ kiblnd_dev_need_failover(kib_dev_t *dev) } int -kiblnd_dev_failover(kib_dev_t *dev) +kiblnd_dev_failover(struct kib_dev *dev, struct net *ns) { - struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo); - struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo); - struct list_head zombie_fpo = LIST_HEAD_INIT(zombie_fpo); + LIST_HEAD(zombie_tpo); + LIST_HEAD(zombie_ppo); + LIST_HEAD(zombie_fpo); struct rdma_cm_id *cmid = NULL; - kib_hca_dev_t *hdev = NULL; - kib_hca_dev_t *old; + struct kib_hca_dev *hdev = NULL; + struct kib_hca_dev *old; struct ib_pd *pd; - kib_net_t *net; + struct kib_net *net; struct sockaddr_in addr; unsigned long flags; int rc = 0; @@ -2645,7 +2747,7 @@ kiblnd_dev_failover(kib_dev_t *dev) dev->ibd_can_failover || dev->ibd_hdev == NULL); - rc = kiblnd_dev_need_failover(dev); + rc = kiblnd_dev_need_failover(dev, ns); if (rc <= 0) goto out; @@ -2666,8 +2768,8 @@ kiblnd_dev_failover(kib_dev_t *dev) rdma_destroy_id(cmid); } - cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP, - IB_QPT_RC); + cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP, + IB_QPT_RC); if (IS_ERR(cmid)) { rc = PTR_ERR(cmid); CERROR("Failed to create cmid for failover: %d\n", rc); @@ -2701,13 +2803,18 @@ kiblnd_dev_failover(kib_dev_t *dev) hdev->ibh_dev = dev; hdev->ibh_cmid = cmid; hdev->ibh_ibdev = cmid->device; + hdev->ibh_port = cmid->port_num; - pd = ib_alloc_pd(cmid->device); - if (IS_ERR(pd)) { - rc = PTR_ERR(pd); - CERROR("Can't allocate PD: %d\n", rc); - goto out; - } +#ifdef HAVE_IB_ALLOC_PD_2ARGS + pd = ib_alloc_pd(cmid->device, 0); +#else + pd = ib_alloc_pd(cmid->device); +#endif + if (IS_ERR(pd)) { + rc = PTR_ERR(pd); + CERROR("Can't allocate PD: %d\n", rc); + goto out; + } hdev->ibh_pd = pd; @@ -2717,11 +2824,23 @@ kiblnd_dev_failover(kib_dev_t *dev) goto out; } - rc = kiblnd_hdev_setup_mrs(hdev); - if (rc != 0) { - CERROR("Can't setup device: %d\n", rc); - goto out; - } + rc = kiblnd_hdev_get_attr(hdev); + if (rc != 0) { + CERROR("Can't get device attributes: %d\n", rc); + goto out; + } + +#ifdef HAVE_IB_GET_DMA_MR + rc = kiblnd_hdev_setup_mrs(hdev); + if (rc != 0) { + CERROR("Can't setup device: %d\n", rc); + goto out; + } +#endif + + INIT_IB_EVENT_HANDLER(&hdev->ibh_event_handler, + hdev->ibh_ibdev, kiblnd_event_handler); + ib_register_event_handler(&hdev->ibh_event_handler); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -2760,9 +2879,9 @@ kiblnd_dev_failover(kib_dev_t *dev) } void -kiblnd_destroy_dev (kib_dev_t *dev) +kiblnd_destroy_dev(struct kib_dev *dev) { - LASSERT (dev->ibd_nnets == 0); + LASSERT(dev->ibd_nnets == 0); LASSERT(list_empty(&dev->ibd_nets)); list_del(&dev->ibd_fail_list); @@ -2774,59 +2893,6 @@ kiblnd_destroy_dev (kib_dev_t *dev) LIBCFS_FREE(dev, sizeof(*dev)); } -static kib_dev_t * -kiblnd_create_dev(char *ifname) -{ - struct net_device *netdev; - kib_dev_t *dev; - __u32 netmask; - __u32 ip; - int up; - int rc; - - rc = lnet_ipif_query(ifname, &up, &ip, &netmask); - if (rc != 0) { - CERROR("Can't query IPoIB interface %s: %d\n", - ifname, rc); - return NULL; - } - - if (!up) { - CERROR("Can't query IPoIB interface %s: it's down\n", ifname); - return NULL; - } - - LIBCFS_ALLOC(dev, sizeof(*dev)); - if (dev == NULL) - return NULL; - - netdev = dev_get_by_name(&init_net, ifname); - if (netdev == NULL) { - dev->ibd_can_failover = 0; - } else { - dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); - dev_put(netdev); - } - - INIT_LIST_HEAD(&dev->ibd_nets); - INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */ - INIT_LIST_HEAD(&dev->ibd_fail_list); - dev->ibd_ifip = ip; - strcpy(&dev->ibd_ifname[0], ifname); - - /* initialize the device */ - rc = kiblnd_dev_failover(dev); - if (rc != 0) { - CERROR("Can't initialize device: %d\n", rc); - LIBCFS_FREE(dev, sizeof(*dev)); - return NULL; - } - - list_add_tail(&dev->ibd_list, - &kiblnd_data.kib_devs); - return dev; -} - static void kiblnd_base_shutdown(void) { @@ -2865,28 +2931,19 @@ kiblnd_base_shutdown(void) wake_up_all(&kiblnd_data.kib_connd_waitq); wake_up_all(&kiblnd_data.kib_failover_waitq); - i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { - i++; - /* power of 2? */ - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "Waiting for %d threads to terminate\n", - atomic_read(&kiblnd_data.kib_nthreads)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - } - - /* fall through */ + wait_var_event_warning(&kiblnd_data.kib_nthreads, + !atomic_read(&kiblnd_data.kib_nthreads), + "Waiting for %d threads to terminate\n", + atomic_read(&kiblnd_data.kib_nthreads)); + /* fall through */ case IBLND_INIT_NOTHING: break; } - if (kiblnd_data.kib_peers != NULL) { - LIBCFS_FREE(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); - } + if (kiblnd_data.kib_peers) + CFS_FREE_PTR_ARRAY(kiblnd_data.kib_peers, + kiblnd_data.kib_peer_hash_size); if (kiblnd_data.kib_scheds != NULL) cfs_percpt_free(kiblnd_data.kib_scheds); @@ -2899,12 +2956,11 @@ kiblnd_base_shutdown(void) } static void -kiblnd_shutdown (lnet_ni_t *ni) +kiblnd_shutdown(struct lnet_ni *ni) { - kib_net_t *net = ni->ni_data; + struct kib_net *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - int i; - unsigned long flags; + unsigned long flags; LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); @@ -2922,22 +2978,16 @@ kiblnd_shutdown (lnet_ni_t *ni) default: LBUG(); - case IBLND_INIT_ALL: - /* nuke all existing peers within this net */ - kiblnd_del_peer(ni, LNET_NID_ANY); + case IBLND_INIT_ALL: + /* nuke all existing peers within this net */ + kiblnd_del_peer(ni, LNET_NID_ANY); - /* Wait for all peer state to clean up */ - i = 2; - while (atomic_read(&net->ibn_npeers) != 0) { - i++; - /* power of 2? */ - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, - "%s: waiting for %d peers to disconnect\n", - libcfs_nid2str(ni->ni_nid), - atomic_read(&net->ibn_npeers)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - } + /* Wait for all peer_ni state to clean up */ + wait_var_event_warning(&net->ibn_npeers, + atomic_read(&net->ibn_npeers) == 0, + "%s: waiting for %d peers to disconnect\n", + libcfs_nid2str(ni->ni_nid), + atomic_read(&net->ibn_npeers)); kiblnd_net_fini_pools(net); @@ -2947,7 +2997,7 @@ kiblnd_shutdown (lnet_ni_t *ni) list_del(&net->ibn_list); write_unlock_irqrestore(g_lock, flags); - /* fall through */ + /* fall through */ case IBLND_INIT_NOTHING: LASSERT (atomic_read(&net->ibn_nconns) == 0); @@ -2970,11 +3020,10 @@ kiblnd_shutdown (lnet_ni_t *ni) out: if (list_empty(&kiblnd_data.kib_devs)) kiblnd_base_shutdown(); - return; } static int -kiblnd_base_startup(void) +kiblnd_base_startup(struct net *ns) { struct kib_sched_info *sched; int rc; @@ -2982,7 +3031,9 @@ kiblnd_base_startup(void) LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); - try_module_get(THIS_MODULE); + if (!try_module_get(THIS_MODULE)) + goto failed; + memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */ rwlock_init(&kiblnd_data.kib_global_lock); @@ -2991,9 +3042,8 @@ kiblnd_base_startup(void) INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs); kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; - LIBCFS_ALLOC(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); + CFS_ALLOC_PTR_ARRAY(kiblnd_data.kib_peers, + kiblnd_data.kib_peer_hash_size); if (kiblnd_data.kib_peers == NULL) goto failed; @@ -3047,7 +3097,7 @@ kiblnd_base_startup(void) } if (*kiblnd_tunables.kib_dev_failover != 0) - rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, + rc = kiblnd_thread_start(kiblnd_failover_thread, ns, "kiblnd_failover"); if (rc != 0) { @@ -3107,8 +3157,8 @@ kiblnd_start_schedulers(struct kib_sched_info *sched) return rc; } -static int -kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) +static int kiblnd_dev_start_threads(struct kib_dev *dev, bool newdev, u32 *cpts, + int ncpts) { int cpt; int rc; @@ -3133,13 +3183,13 @@ kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) return 0; } -static kib_dev_t * +static struct kib_dev * kiblnd_dev_search(char *ifname) { - kib_dev_t *alias = NULL; - kib_dev_t *dev; - char *colon; - char *colon2; + struct kib_dev *alias = NULL; + struct kib_dev *dev; + char *colon; + char *colon2; colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { @@ -3167,102 +3217,151 @@ kiblnd_dev_search(char *ifname) } static int -kiblnd_startup (lnet_ni_t *ni) -{ - char *ifname; - kib_dev_t *ibdev = NULL; - kib_net_t *net; - struct timeval tv; - unsigned long flags; - int rc; - int newdev; - - LASSERT (ni->ni_net->net_lnd == &the_o2iblnd); - - if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { - rc = kiblnd_base_startup(); - if (rc != 0) - return rc; - } +kiblnd_startup(struct lnet_ni *ni) +{ + char *ifname = NULL; + struct lnet_inetdev *ifaces = NULL; + struct kib_dev *ibdev = NULL; + struct kib_net *net = NULL; + unsigned long flags; + int rc; + int i; + bool newdev; - LIBCFS_ALLOC(net, sizeof(*net)); - ni->ni_data = net; - if (net == NULL) - goto failed; + LASSERT(ni->ni_net->net_lnd == &the_o2iblnd); + + if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { + rc = kiblnd_base_startup(ni->ni_net_ns); + if (rc != 0) + return rc; + } + + LIBCFS_ALLOC(net, sizeof(*net)); + ni->ni_data = net; + if (net == NULL) { + rc = -ENOMEM; + goto failed; + } - do_gettimeofday(&tv); - net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; + net->ibn_ni = ni; + net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC; kiblnd_tunables_setup(ni); - if (ni->ni_interfaces[0] != NULL) { - /* Use the IPoIB interface specified in 'networks=' */ + /* + * ni_interfaces is only to support legacy pre Multi-Rail + * tcp bonding for ksocklnd. Multi-Rail wants each secondary + * IP to be treated as an unique 'struct ni' interfaces instead. + */ + if (ni->ni_interfaces[0] != NULL) { + /* Use the IPoIB interface specified in 'networks=' */ + if (ni->ni_interfaces[1] != NULL) { + CERROR("ko2iblnd: Multiple interfaces not supported\n"); + rc = -EINVAL; + goto failed; + } - CLASSERT (LNET_MAX_INTERFACES > 1); - if (ni->ni_interfaces[1] != NULL) { - CERROR("Multiple interfaces not supported\n"); - goto failed; - } + ifname = ni->ni_interfaces[0]; + } else { + ifname = *kiblnd_tunables.kib_default_ipif; + } - ifname = ni->ni_interfaces[0]; - } else { - ifname = *kiblnd_tunables.kib_default_ipif; - } + if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) { + CERROR("IPoIB interface name too long: %s\n", ifname); + rc = -E2BIG; + goto failed; + } - if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) { - CERROR("IPoIB interface name too long: %s\n", ifname); - goto failed; - } + rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns); + if (rc < 0) + goto failed; - ibdev = kiblnd_dev_search(ifname); + for (i = 0; i < rc; i++) { + if (strcmp(ifname, ifaces[i].li_name) == 0) + break; + } + if (i == rc) { + CERROR("ko2iblnd: No matching interfaces\n"); + rc = -ENOENT; + goto failed; + } + + ibdev = kiblnd_dev_search(ifname); newdev = ibdev == NULL; /* hmm...create kib_dev even for alias */ - if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) - ibdev = kiblnd_create_dev(ifname); + if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) { + LIBCFS_ALLOC(ibdev, sizeof(*ibdev)); + if (!ibdev) { + rc = -ENOMEM; + goto failed; + } - if (ibdev == NULL) - goto failed; + ibdev->ibd_ifip = ifaces[i].li_ipaddr; + strlcpy(ibdev->ibd_ifname, ifaces[i].li_name, + sizeof(ibdev->ibd_ifname)); + ibdev->ibd_can_failover = !!(ifaces[i].li_flags & IFF_MASTER); + + INIT_LIST_HEAD(&ibdev->ibd_nets); + INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */ + INIT_LIST_HEAD(&ibdev->ibd_fail_list); + + /* initialize the device */ + rc = kiblnd_dev_failover(ibdev, ni->ni_net_ns); + if (rc) { + CERROR("ko2iblnd: Can't initialize device: rc = %d\n", + rc); + goto failed; + } + + list_add_tail(&ibdev->ibd_list, &kiblnd_data.kib_devs); + } + + net->ibn_dev = ibdev; + ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); - net->ibn_dev = ibdev; - ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); + ni->ni_dev_cpt = ifaces[i].li_cpt; - rc = kiblnd_dev_start_threads(ibdev, newdev, - ni->ni_cpts, ni->ni_ncpts); + rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts); if (rc != 0) goto failed; rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) { - CERROR("Failed to initialize NI pools: %d\n", rc); - goto failed; - } + if (rc != 0) { + CERROR("Failed to initialize NI pools: %d\n", rc); + goto failed; + } write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); ibdev->ibd_nnets++; list_add_tail(&net->ibn_list, &ibdev->ibd_nets); + /* for health check */ + if (ibdev->ibd_hdev->ibh_state == IBLND_DEV_PORT_DOWN) + kiblnd_set_ni_fatal_on(ibdev->ibd_hdev, 1); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - net->ibn_init = IBLND_INIT_ALL; + net->ibn_init = IBLND_INIT_ALL; - return 0; + return 0; failed: if (net != NULL && net->ibn_dev == NULL && ibdev != NULL) - kiblnd_destroy_dev(ibdev); + kiblnd_destroy_dev(ibdev); - kiblnd_shutdown(ni); + kfree(ifaces); + kiblnd_shutdown(ni); - CDEBUG(D_NET, "kiblnd_startup failed\n"); - return -ENETDOWN; + CDEBUG(D_NET, "Configuration of device %s failed: rc = %d\n", + ifname ? ifname : "", rc); + + return -ENETDOWN; } -static lnd_t the_o2iblnd = { +static const struct lnet_lnd the_o2iblnd = { .lnd_type = O2IBLND, .lnd_startup = kiblnd_startup, .lnd_shutdown = kiblnd_shutdown, .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, .lnd_send = kiblnd_send, .lnd_recv = kiblnd_recv, }; @@ -3276,13 +3375,13 @@ static int __init ko2iblnd_init(void) { int rc; - CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); - CLASSERT(offsetof(kib_msg_t, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= - IBLND_MSG_SIZE); - CLASSERT(offsetof(kib_msg_t, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE); + BUILD_BUG_ON(offsetof(struct kib_msg, + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) > + IBLND_MSG_SIZE); + BUILD_BUG_ON(offsetof(struct kib_msg, + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) > + IBLND_MSG_SIZE); rc = kiblnd_tunables_init(); if (rc != 0)