X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=8196b1bbf2cf7cd0de700fc8cab76cf8d6688ce7;hp=12239064aeebf37324b5e13e1525a84121b99115;hb=88f761bc00c7fb29db4f80594ae864493bdd5071;hpb=0754bc8f2623bea184111af216f7567608db35b6 diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index 1223906..8196b1b 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -668,7 +668,7 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) static int kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - unsigned int niov, struct iovec *iov, int offset, int nob) + unsigned int niov, struct kvec *iov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct page *page; @@ -1433,7 +1433,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) int target_is_router = lntmsg->msg_target_is_router; int routing = lntmsg->msg_routing; unsigned int payload_niov = lntmsg->msg_niov; - struct iovec *payload_iov = lntmsg->msg_iov; + struct kvec *payload_iov = lntmsg->msg_iov; lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; @@ -1599,7 +1599,7 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; - struct iovec *iov = lntmsg->msg_iov; + struct kvec *iov = lntmsg->msg_iov; lnet_kiov_t *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; @@ -1657,9 +1657,9 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) } int -kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, - unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; @@ -2312,6 +2312,10 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } + /* We have validated the peer's parameters so use those */ + peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; + write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); @@ -2350,6 +2354,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) peer2->ibp_accepting++; kiblnd_peer_addref(peer2); + /* Race with kiblnd_launch_tx (active connect) to create peer + * so copy validated parameters since we now know what the + * peer's limits are */ + peer2->ibp_max_frags = peer->ibp_max_frags; + peer2->ibp_queue_depth = peer->ibp_queue_depth; + write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; @@ -2372,8 +2382,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version, - &reqmsg->ibm_u.connparams); + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_decref(peer); @@ -2383,10 +2392,9 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* conn now "owns" cmid, so I return success from here on to ensure the * CM callback doesn't destroy cmid. */ - conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = reqmsg->ibm_u.connparams.ibcp_queue_depth; - conn->ibc_reserved_credits = reqmsg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_credits = conn->ibc_queue_depth; + conn->ibc_reserved_credits = conn->ibc_queue_depth; LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); @@ -2395,10 +2403,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = - reqmsg->ibm_u.connparams.ibcp_queue_depth; - ackmsg->ibm_u.connparams.ibcp_max_frags = - reqmsg->ibm_u.connparams.ibcp_max_frags; + ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); @@ -2488,6 +2494,9 @@ kiblnd_reconnect (kib_conn_t *conn, int version, break; case IBLND_REJECT_RDMA_FRAGS: + if (!cp) + goto failed; + if (conn->ibc_max_frags <= cp->ibcp_max_frags) { CNETERR("Unsupported max frags, peer supports %d\n", cp->ibcp_max_frags); @@ -2498,18 +2507,21 @@ kiblnd_reconnect (kib_conn_t *conn, int version, goto failed; } - conn->ibc_max_frags = cp->ibcp_max_frags; + peer->ibp_max_frags = cp->ibcp_max_frags; reason = "rdma fragments"; break; case IBLND_REJECT_MSG_QUEUE_SIZE: + if (!cp) + goto failed; + if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) { CNETERR("Unsupported queue depth, peer supports %d\n", cp->ibcp_queue_depth); goto failed; } - conn->ibc_queue_depth = cp->ibcp_queue_depth; + peer->ibp_queue_depth = cp->ibcp_queue_depth; reason = "queue depth"; break; @@ -2787,7 +2799,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, - version, NULL); + version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_decref(peer); /* lose cmid's ref */