From: eeb Date: Thu, 29 Sep 2005 14:01:10 +0000 (+0000) Subject: * PTL_MTU,PTL_MD_MAX_IOV -> LNET_MTU,LNET_MAX_IOV X-Git-Tag: v1_7_100~1^25~6^2~129 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=8a746280a039c03fef4010051dab02ab959eebf0;p=fs%2Flustre-release.git * PTL_MTU,PTL_MD_MAX_IOV -> LNET_MTU,LNET_MAX_IOV * moved portals mtu and max iov out of lib-types.h into types.h so ptllnd can see PTL_MTU/PTL_MD_MAX_IOV CAVEAT EMPTOR: this might break ptllnd builds with non-lustre portals since these defines are not in the official spec. Also, ptllnd really needs the underlying portals to cope with LNET's MTU and MAX_IOV; it simply asserts it currently. --- diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index f0c490f..93457e0 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -216,8 +216,8 @@ typedef struct lnet_libmd { void *md_addrkey; unsigned int md_niov; /* # frags */ union { - struct iovec iov[PTL_MD_MAX_IOV]; - lnet_kiov_t kiov[PTL_MD_MAX_IOV]; + struct iovec iov[LNET_MAX_IOV]; + lnet_kiov_t kiov[LNET_MAX_IOV]; } md_iov; } lnet_libmd_t; diff --git a/lnet/include/lnet/types.h b/lnet/include/lnet/types.h index c681638..34c97b3 100644 --- a/lnet/include/lnet/types.h +++ b/lnet/include/lnet/types.h @@ -57,9 +57,9 @@ typedef struct { } lnet_md_t; /* Max message size */ -#define PTL_MTU (1<<20) +#define LNET_MTU (1<<20) /* limit on the number of entries in discontiguous MDs */ -#define PTL_MD_MAX_IOV 256 +#define LNET_MAX_IOV 256 /* Options for the MD structure */ #define LNET_MD_OP_PUT (1 << 0) diff --git a/lnet/klnds/gmlnd/gmlnd_api.c b/lnet/klnds/gmlnd/gmlnd_api.c index 201ea1d..2ff9e84 100644 --- a/lnet/klnds/gmlnd/gmlnd_api.c +++ b/lnet/klnds/gmlnd/gmlnd_api.c @@ -180,7 +180,7 @@ gmnal_startup(lnet_ni_t *ni) CDEBUG(D_NET, "portals_nid is %s\n", libcfs_nid2str(ni->ni_nid)); gmni->gmni_large_msgsize = - offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[PTL_MTU]); + offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[LNET_MTU]); gmni->gmni_large_gmsize = gm_min_size_for_length(gmni->gmni_large_msgsize); gmni->gmni_large_pages = diff --git a/lnet/klnds/iiblnd/iiblnd.c b/lnet/klnds/iiblnd/iiblnd.c index 7955e23..d8253b9 100644 --- a/lnet/klnds/iiblnd/iiblnd.c +++ b/lnet/klnds/iiblnd/iiblnd.c @@ -1509,7 +1509,7 @@ kibnal_startup (lnet_ni_t *ni) { const int pool_size = IBNAL_NTX; struct ib_fmr_pool_param params = { - .max_pages_per_fmr = PTL_MTU/PAGE_SIZE, + .max_pages_per_fmr = LNET_MTU/PAGE_SIZE, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ), diff --git a/lnet/klnds/iiblnd/iiblnd.h b/lnet/klnds/iiblnd/iiblnd.h index 9a84549..6585f51 100644 --- a/lnet/klnds/iiblnd/iiblnd.h +++ b/lnet/klnds/iiblnd/iiblnd.h @@ -114,7 +114,7 @@ #define IBNAL_TX_MSG_BYTES (IBNAL_TX_MSGS * IBNAL_MSG_SIZE) #define IBNAL_TX_MSG_PAGES ((IBNAL_TX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE) -#define IBNAL_TX_MAX_SG (PTL_MD_MAX_IOV + 1) +#define IBNAL_TX_MAX_SG (LNET_MAX_IOV + 1) /* RX messages (per connection) */ #define IBNAL_RX_MSGS IBNAL_MSG_QUEUE_SIZE diff --git a/lnet/klnds/iiblnd/iiblnd_cb.c b/lnet/klnds/iiblnd/iiblnd_cb.c index 58461e3..4ed6bd3 100644 --- a/lnet/klnds/iiblnd/iiblnd_cb.c +++ b/lnet/klnds/iiblnd/iiblnd_cb.c @@ -441,7 +441,7 @@ kibnal_rx_callback (IB_WORK_COMPLETION *wc) CDEBUG(D_NET, "%d RDMA: cookie "LPX64":\n", msg->ibm_type, msg->ibm_u.rdma.ibrm_cookie); - if ((msg->ibm_u.rdma.ibrm_num_descs > PTL_MD_MAX_IOV) || + if ((msg->ibm_u.rdma.ibrm_num_descs > LNET_MAX_IOV) || (kib_rdma_msg_len(msg->ibm_u.rdma.ibrm_num_descs) > min(nob, IBNAL_MSG_SIZE))) { CERROR ("num_descs %d too large\n", @@ -575,7 +575,7 @@ kibnal_fill_ibrm(kib_tx_t *tx, struct page *page, unsigned long page_offset, kib_rdma_msg_t *ibrm = &tx->tx_msg->ibm_u.rdma; kib_rdma_desc_t *desc; - LASSERTF(ibrm->ibrm_num_descs < PTL_MD_MAX_IOV, "%u\n", + LASSERTF(ibrm->ibrm_num_descs < LNET_MAX_IOV, "%u\n", ibrm->ibrm_num_descs); desc = &ibrm->ibrm_desc[ibrm->ibrm_num_descs]; @@ -734,7 +734,7 @@ kibnal_map_kiov (kib_tx_t *tx, IB_ACCESS_CONTROL access, goto out; } - if (nphys == PTL_MD_MAX_IOV) { + if (nphys == LNET_MAX_IOV) { CERROR ("payload too big (%d)\n", nphys); rc = -EMSGSIZE; goto out; @@ -1484,7 +1484,7 @@ kibnal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); /* Thread context if we're sending payload */ LASSERT (!in_interrupt() || payload_nob == 0); diff --git a/lnet/klnds/openiblnd/openiblnd.c b/lnet/klnds/openiblnd/openiblnd.c index 91e67c8..54310c2 100644 --- a/lnet/klnds/openiblnd/openiblnd.c +++ b/lnet/klnds/openiblnd/openiblnd.c @@ -1573,7 +1573,7 @@ kibnal_startup (lnet_ni_t *ni) { const int pool_size = *kibnal_tunables.kib_ntx; struct ib_fmr_pool_param params = { - .max_pages_per_fmr = PTL_MTU/PAGE_SIZE, + .max_pages_per_fmr = LNET_MTU/PAGE_SIZE, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ), diff --git a/lnet/klnds/openiblnd/openiblnd_cb.c b/lnet/klnds/openiblnd/openiblnd_cb.c index 69af83b..797c91e 100644 --- a/lnet/klnds/openiblnd/openiblnd_cb.c +++ b/lnet/klnds/openiblnd/openiblnd_cb.c @@ -542,7 +542,7 @@ kibnal_map_kiov (kib_tx_t *tx, enum ib_memory_access access, goto out; } - if (nphys == PTL_MD_MAX_IOV) { + if (nphys == LNET_MAX_IOV) { CERROR ("payload too big (%d)\n", nphys); rc = -EMSGSIZE; goto out; @@ -1219,7 +1219,7 @@ kibnal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); /* Thread context if we're sending payload */ LASSERT (!in_interrupt() || payload_niov == 0); diff --git a/lnet/klnds/ptllnd/ptllnd_cb.c b/lnet/klnds/ptllnd/ptllnd_cb.c index 2c04334..408cc49 100644 --- a/lnet/klnds/ptllnd/ptllnd_cb.c +++ b/lnet/klnds/ptllnd/ptllnd_cb.c @@ -426,7 +426,9 @@ kptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); + + LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */ /* Thread context */ LASSERT (!in_interrupt()); @@ -720,6 +722,8 @@ int kptllnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, /* Either all pages or all vaddrs */ LASSERT (!(kiov != NULL && iov != NULL)); + LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */ + if(delayed) STAT_UPDATE(kps_recv_delayed); diff --git a/lnet/klnds/qswlnd/qswlnd.h b/lnet/klnds/qswlnd/qswlnd.h index 985b279..c5f3c61 100644 --- a/lnet/klnds/qswlnd/qswlnd.h +++ b/lnet/klnds/qswlnd/qswlnd.h @@ -110,7 +110,7 @@ #define KQSW_TX_BUFFER_SIZE (KQSW_HDR_SIZE + *kqswnal_tunables.kqn_tx_maxcontig) /* The pre-allocated tx buffer (hdr + small payload) */ -#define KQSW_NTXMSGPAGES (btopr(KQSW_TX_BUFFER_SIZE) + 1 + btopr(PTL_MTU) + 1) +#define KQSW_NTXMSGPAGES (btopr(KQSW_TX_BUFFER_SIZE) + 1 + btopr(LNET_MTU) + 1) /* Reserve elan address space for pre-allocated and pre-mapped transmit * buffer and a full payload too. Extra pages allow for page alignment */ @@ -118,7 +118,7 @@ /* receive hdr/payload always contiguous and page aligned */ #define KQSW_NRXMSGBYTES_SMALL (KQSW_NRXMSGPAGES_SMALL * PAGE_SIZE) -#define KQSW_NRXMSGPAGES_LARGE (btopr(KQSW_HDR_SIZE + PTL_MTU)) +#define KQSW_NRXMSGPAGES_LARGE (btopr(KQSW_HDR_SIZE + LNET_MTU)) /* receive hdr/payload always contiguous and page aligned */ #define KQSW_NRXMSGBYTES_LARGE (KQSW_NRXMSGPAGES_LARGE * PAGE_SIZE) /* biggest complete packet we can receive (or transmit) */ diff --git a/lnet/klnds/qswlnd/qswlnd_cb.c b/lnet/klnds/qswlnd/qswlnd_cb.c index c496e1b..526c6d9 100644 --- a/lnet/klnds/qswlnd/qswlnd_cb.c +++ b/lnet/klnds/qswlnd/qswlnd_cb.c @@ -972,7 +972,7 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); /* It must be OK to kmap() if required */ LASSERT (payload_kiov == NULL || !in_interrupt ()); diff --git a/lnet/klnds/ralnd/ralnd.c b/lnet/klnds/ralnd/ralnd.c index b289fcb..a590115 100644 --- a/lnet/klnds/ralnd/ralnd.c +++ b/lnet/klnds/ralnd/ralnd.c @@ -1241,7 +1241,7 @@ kranal_free_txdescs(struct list_head *freelist) tx = list_entry(freelist->next, kra_tx_t, tx_list); list_del(&tx->tx_list); - LIBCFS_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys)); + LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys)); LIBCFS_FREE(tx, sizeof(*tx)); } } @@ -1265,7 +1265,7 @@ kranal_alloc_txdescs(struct list_head *freelist, int n) } LIBCFS_ALLOC(tx->tx_phys, - PTL_MD_MAX_IOV * sizeof(*tx->tx_phys)); + LNET_MAX_IOV * sizeof(*tx->tx_phys)); if (tx->tx_phys == NULL) { CERROR("Can't allocate tx[%d]->tx_phys\n", i); diff --git a/lnet/klnds/ralnd/ralnd_cb.c b/lnet/klnds/ralnd/ralnd_cb.c index 7ead02c..21f5df1 100644 --- a/lnet/klnds/ralnd/ralnd_cb.c +++ b/lnet/klnds/ralnd/ralnd_cb.c @@ -232,7 +232,7 @@ kranal_setup_phys_buffer (kra_tx_t *tx, int nkiov, lnet_kiov_t *kiov, return -EINVAL; } - if ((phys - tx->tx_phys) == PTL_MD_MAX_IOV) { + if ((phys - tx->tx_phys) == LNET_MAX_IOV) { CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys)); return -EMSGSIZE; } @@ -609,7 +609,7 @@ kranal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) nob, niov, libcfs_id2str(target)); LASSERT (nob == 0 || niov > 0); - LASSERT (niov <= PTL_MD_MAX_IOV); + LASSERT (niov <= LNET_MAX_IOV); LASSERT (!in_interrupt()); /* payload is either all vaddrs or all pages */ diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 868b44b..aaf89ce 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -234,10 +234,10 @@ typedef struct /* transmit packet */ /* network zero copy callback descriptor embedded in ksock_tx_t */ /* space for the rx frag descriptors; we either read a single contiguous - * header, or up to PTL_MD_MAX_IOV frags of payload of either type. */ + * header, or up to LNET_MAX_IOV frags of payload of either type. */ typedef union { - struct iovec iov[PTL_MD_MAX_IOV]; - lnet_kiov_t kiov[PTL_MD_MAX_IOV]; + struct iovec iov[LNET_MAX_IOV]; + lnet_kiov_t kiov[LNET_MAX_IOV]; } ksock_rxiovspace_t; #define SOCKNAL_RX_HEADER 1 /* reading header */ @@ -291,10 +291,10 @@ typedef struct ksock_conn int ksnc_tx_scheduled; /* being progressed */ #if !SOCKNAL_SINGLE_FRAG_RX - struct iovec ksnc_rx_scratch_iov[PTL_MD_MAX_IOV]; + struct iovec ksnc_rx_scratch_iov[LNET_MAX_IOV]; #endif #if !SOCKNAL_SINGLE_FRAG_TX - struct iovec ksnc_tx_scratch_iov[PTL_MD_MAX_IOV]; + struct iovec ksnc_tx_scratch_iov[LNET_MAX_IOV]; #endif } ksock_conn_t; diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 7a8fdac..46b6c0a 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -801,7 +801,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); LASSERT (!in_interrupt ()); @@ -1042,7 +1042,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned long flags; LASSERT (mlen <= rlen); - LASSERT (niov <= PTL_MD_MAX_IOV); + LASSERT (niov <= LNET_MAX_IOV); conn->ksnc_cookie = msg; conn->ksnc_rx_nob_wanted = mlen; @@ -1053,14 +1053,14 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, conn->ksnc_rx_kiov = NULL; conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; conn->ksnc_rx_niov = - lnet_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov, + lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov, niov, iov, offset, mlen); } else { conn->ksnc_rx_niov = 0; conn->ksnc_rx_iov = NULL; conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; conn->ksnc_rx_nkiov = - lnet_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov, + lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, niov, kiov, offset, mlen); } diff --git a/lnet/klnds/viblnd/viblnd.c b/lnet/klnds/viblnd/viblnd.c index f6cfa0e..14baf37 100644 --- a/lnet/klnds/viblnd/viblnd.c +++ b/lnet/klnds/viblnd/viblnd.c @@ -1255,7 +1255,7 @@ kibnal_alloc_tx_descs (void) kib_tx_t *tx = &kibnal_data.kib_tx_descs[i]; #if IBNAL_USE_FMR - LIBCFS_ALLOC(tx->tx_pages, PTL_MD_MAX_IOV * + LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages)); if (tx->tx_pages == NULL) return -ENOMEM; @@ -1296,7 +1296,7 @@ kibnal_free_tx_descs (void) #if IBNAL_USE_FMR if (tx->tx_pages != NULL) - LIBCFS_FREE(tx->tx_pages, PTL_MD_MAX_IOV * + LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages)); #else if (tx->tx_wrq != NULL) @@ -1375,7 +1375,7 @@ kibnal_setup_tx_descs (void) fmr_props.pd_hndl = kibnal_data.kib_pd; fmr_props.acl = (vv_acc_r_mem_write | vv_acc_l_mem_write); - fmr_props.max_pages = PTL_MD_MAX_IOV; + fmr_props.max_pages = LNET_MAX_IOV; fmr_props.log2_page_sz = PAGE_SHIFT; fmr_props.max_outstanding_maps = *kibnal_tunables.kib_fmr_remaps; diff --git a/lnet/klnds/viblnd/viblnd.h b/lnet/klnds/viblnd/viblnd.h index 050233b..a894934 100644 --- a/lnet/klnds/viblnd/viblnd.h +++ b/lnet/klnds/viblnd/viblnd.h @@ -154,7 +154,7 @@ #if IBNAL_USE_FMR # define IBNAL_MAX_RDMA_FRAGS 1 #else -# define IBNAL_MAX_RDMA_FRAGS PTL_MD_MAX_IOV +# define IBNAL_MAX_RDMA_FRAGS LNET_MAX_IOV #endif /* RX messages (per connection) */ diff --git a/lnet/klnds/viblnd/viblnd_cb.c b/lnet/klnds/viblnd/viblnd_cb.c index fd56975..c91c744 100644 --- a/lnet/klnds/viblnd/viblnd_cb.c +++ b/lnet/klnds/viblnd/viblnd_cb.c @@ -673,7 +673,7 @@ kibnal_map_tx (kib_tx_t *tx, kib_rdma_desc_t *rd, int active, LASSERT (tx->tx_md.md_fmrcount > 0); LASSERT (page_offset < PAGE_SIZE); LASSERT (npages >= (1 + ((page_offset + nob - 1)>>PAGE_SHIFT))); - LASSERT (npages <= PTL_MD_MAX_IOV); + LASSERT (npages <= LNET_MAX_IOV); memset(&map_props, 0, sizeof(map_props)); @@ -742,7 +742,7 @@ kibnal_setup_rd_iov (kib_tx_t *tx, kib_rdma_desc_t *rd, npages = 0; do { - LASSERT (npages < PTL_MD_MAX_IOV); + LASSERT (npages < LNET_MAX_IOV); page = kibnal_kvaddr_to_page(vaddr); if (page == NULL) { @@ -776,7 +776,7 @@ kibnal_setup_rd_kiov (kib_tx_t *tx, kib_rdma_desc_t *rd, LASSERT (nob > 0); LASSERT (nkiov > 0); - LASSERT (nkiov <= PTL_MD_MAX_IOV); + LASSERT (nkiov <= LNET_MAX_IOV); LASSERT (!tx->tx_md.md_active); LASSERT ((rd != tx->tx_rd) == !active); @@ -793,7 +793,7 @@ kibnal_setup_rd_kiov (kib_tx_t *tx, kib_rdma_desc_t *rd, npages = 0; do { - LASSERT (npages < PTL_MD_MAX_IOV); + LASSERT (npages < LNET_MAX_IOV); LASSERT (nkiov > 0); if ((npages > 0 && kiov->kiov_offset != 0) || @@ -1374,7 +1374,7 @@ kibnal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_nob, payload_niov, libcfs_id2str(target)); LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= PTL_MD_MAX_IOV); + LASSERT (payload_niov <= LNET_MAX_IOV); /* Thread context */ LASSERT (!in_interrupt()); diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index 5ddd84f..f5e0ca3 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -205,7 +205,7 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, LASSERT (the_lnet.ln_refcount > 0); if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 && - umd.length > PTL_MD_MAX_IOV) /* too many fragments */ + umd.length > LNET_MAX_IOV) /* too many fragments */ return -EINVAL; md = lnet_md_alloc(&umd); @@ -248,7 +248,7 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) LASSERT (the_lnet.ln_refcount > 0); if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 && - umd.length > PTL_MD_MAX_IOV) /* too many fragments */ + umd.length > LNET_MAX_IOV) /* too many fragments */ return -EINVAL; md = lnet_md_alloc(&umd); diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 99e6b24..c7e54d9 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -945,7 +945,7 @@ lnet_msg2bufpool(lnet_msg_t *msg) { lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0]; - LASSERT (msg->msg_len <= PTL_MTU); + LASSERT (msg->msg_len <= LNET_MTU); while (msg->msg_len > rbp->rbp_npages * PAGE_SIZE) { rbp++; LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]); @@ -1701,13 +1701,13 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, void *private) case LNET_MSG_PUT: case LNET_MSG_REPLY: - if (payload_length > PTL_MTU) { + if (payload_length > LNET_MTU) { CERROR("%s, src %s: bad %s payload %d " "(%d max expected)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), lnet_msgtyp2str(type), - payload_length, PTL_MTU); + payload_length, LNET_MTU); return -EPROTO; } break; diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index 41eed78..42b6a0d 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -586,7 +586,7 @@ void lnet_init_rtrpools(void) { int small_pages = 1; - int large_pages = (PTL_MTU + PAGE_SIZE - 1) / PAGE_SIZE; + int large_pages = (LNET_MTU + PAGE_SIZE - 1) / PAGE_SIZE; lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0); lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages); diff --git a/lnet/ulnds/ptllnd/ptllnd_cb.c b/lnet/ulnds/ptllnd/ptllnd_cb.c index d8de788..93b8e0f 100644 --- a/lnet/ulnds/ptllnd/ptllnd_cb.c +++ b/lnet/ulnds/ptllnd/ptllnd_cb.c @@ -680,6 +680,8 @@ ptllnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *msg) LASSERT (!msg->msg_routing); LASSERT (msg->msg_kiov == NULL); + LASSERT (msg->msg_niov <= PTL_MD_MAX_IOV); /* !!! */ + plp = ptllnd_find_peer(ni, msg->msg_target.nid, 1); if (plp == NULL) return -ENOMEM; @@ -823,6 +825,7 @@ ptllnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int nob; LASSERT (kiov == NULL); + LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */ switch (rx->rx_msg->ptlm_type) { default: diff --git a/lnet/ulnds/socklnd/connection.c b/lnet/ulnds/socklnd/connection.c index 326779a..092d564 100644 --- a/lnet/ulnds/socklnd/connection.c +++ b/lnet/ulnds/socklnd/connection.c @@ -50,7 +50,7 @@ /* tunables (via environment) */ int tcpnal_acceptor_port = 988; -int tcpnal_buffer_size = 2 * (PTL_MTU + sizeof(lnet_hdr_t)); +int tcpnal_buffer_size = 2 * (LNET_MTU + sizeof(lnet_hdr_t)); int tcpnal_nagle = 0; int