void *md_addrkey;
unsigned int md_niov; /* # frags */
union {
- struct iovec iov[PTL_MD_MAX_IOV];
- lnet_kiov_t kiov[PTL_MD_MAX_IOV];
+ struct iovec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
} md_iov;
} lnet_libmd_t;
} lnet_md_t;
/* Max message size */
-#define PTL_MTU (1<<20)
+#define LNET_MTU (1<<20)
/* limit on the number of entries in discontiguous MDs */
-#define PTL_MD_MAX_IOV 256
+#define LNET_MAX_IOV 256
/* Options for the MD structure */
#define LNET_MD_OP_PUT (1 << 0)
CDEBUG(D_NET, "portals_nid is %s\n", libcfs_nid2str(ni->ni_nid));
gmni->gmni_large_msgsize =
- offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[PTL_MTU]);
+ offsetof(gmnal_msg_t, gmm_u.immediate.gmim_payload[LNET_MTU]);
gmni->gmni_large_gmsize =
gm_min_size_for_length(gmni->gmni_large_msgsize);
gmni->gmni_large_pages =
{
const int pool_size = IBNAL_NTX;
struct ib_fmr_pool_param params = {
- .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MTU/PAGE_SIZE,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ),
#define IBNAL_TX_MSG_BYTES (IBNAL_TX_MSGS * IBNAL_MSG_SIZE)
#define IBNAL_TX_MSG_PAGES ((IBNAL_TX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
-#define IBNAL_TX_MAX_SG (PTL_MD_MAX_IOV + 1)
+#define IBNAL_TX_MAX_SG (LNET_MAX_IOV + 1)
/* RX messages (per connection) */
#define IBNAL_RX_MSGS IBNAL_MSG_QUEUE_SIZE
CDEBUG(D_NET, "%d RDMA: cookie "LPX64":\n",
msg->ibm_type, msg->ibm_u.rdma.ibrm_cookie);
- if ((msg->ibm_u.rdma.ibrm_num_descs > PTL_MD_MAX_IOV) ||
+ if ((msg->ibm_u.rdma.ibrm_num_descs > LNET_MAX_IOV) ||
(kib_rdma_msg_len(msg->ibm_u.rdma.ibrm_num_descs) >
min(nob, IBNAL_MSG_SIZE))) {
CERROR ("num_descs %d too large\n",
kib_rdma_msg_t *ibrm = &tx->tx_msg->ibm_u.rdma;
kib_rdma_desc_t *desc;
- LASSERTF(ibrm->ibrm_num_descs < PTL_MD_MAX_IOV, "%u\n",
+ LASSERTF(ibrm->ibrm_num_descs < LNET_MAX_IOV, "%u\n",
ibrm->ibrm_num_descs);
desc = &ibrm->ibrm_desc[ibrm->ibrm_num_descs];
goto out;
}
- if (nphys == PTL_MD_MAX_IOV) {
+ if (nphys == LNET_MAX_IOV) {
CERROR ("payload too big (%d)\n", nphys);
rc = -EMSGSIZE;
goto out;
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
/* Thread context if we're sending payload */
LASSERT (!in_interrupt() || payload_nob == 0);
{
const int pool_size = *kibnal_tunables.kib_ntx;
struct ib_fmr_pool_param params = {
- .max_pages_per_fmr = PTL_MTU/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MTU/PAGE_SIZE,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ),
goto out;
}
- if (nphys == PTL_MD_MAX_IOV) {
+ if (nphys == LNET_MAX_IOV) {
CERROR ("payload too big (%d)\n", nphys);
rc = -EMSGSIZE;
goto out;
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
/* Thread context if we're sending payload */
LASSERT (!in_interrupt() || payload_niov == 0);
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+
+ LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
/* Thread context */
LASSERT (!in_interrupt());
/* Either all pages or all vaddrs */
LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
+
if(delayed)
STAT_UPDATE(kps_recv_delayed);
#define KQSW_TX_BUFFER_SIZE (KQSW_HDR_SIZE + *kqswnal_tunables.kqn_tx_maxcontig)
/* The pre-allocated tx buffer (hdr + small payload) */
-#define KQSW_NTXMSGPAGES (btopr(KQSW_TX_BUFFER_SIZE) + 1 + btopr(PTL_MTU) + 1)
+#define KQSW_NTXMSGPAGES (btopr(KQSW_TX_BUFFER_SIZE) + 1 + btopr(LNET_MTU) + 1)
/* Reserve elan address space for pre-allocated and pre-mapped transmit
* buffer and a full payload too. Extra pages allow for page alignment */
/* receive hdr/payload always contiguous and page aligned */
#define KQSW_NRXMSGBYTES_SMALL (KQSW_NRXMSGPAGES_SMALL * PAGE_SIZE)
-#define KQSW_NRXMSGPAGES_LARGE (btopr(KQSW_HDR_SIZE + PTL_MTU))
+#define KQSW_NRXMSGPAGES_LARGE (btopr(KQSW_HDR_SIZE + LNET_MTU))
/* receive hdr/payload always contiguous and page aligned */
#define KQSW_NRXMSGBYTES_LARGE (KQSW_NRXMSGPAGES_LARGE * PAGE_SIZE)
/* biggest complete packet we can receive (or transmit) */
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
/* It must be OK to kmap() if required */
LASSERT (payload_kiov == NULL || !in_interrupt ());
tx = list_entry(freelist->next, kra_tx_t, tx_list);
list_del(&tx->tx_list);
- LIBCFS_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
+ LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys));
LIBCFS_FREE(tx, sizeof(*tx));
}
}
}
LIBCFS_ALLOC(tx->tx_phys,
- PTL_MD_MAX_IOV * sizeof(*tx->tx_phys));
+ LNET_MAX_IOV * sizeof(*tx->tx_phys));
if (tx->tx_phys == NULL) {
CERROR("Can't allocate tx[%d]->tx_phys\n", i);
return -EINVAL;
}
- if ((phys - tx->tx_phys) == PTL_MD_MAX_IOV) {
+ if ((phys - tx->tx_phys) == LNET_MAX_IOV) {
CERROR ("payload too big (%d)\n", (int)(phys - tx->tx_phys));
return -EMSGSIZE;
}
nob, niov, libcfs_id2str(target));
LASSERT (nob == 0 || niov > 0);
- LASSERT (niov <= PTL_MD_MAX_IOV);
+ LASSERT (niov <= LNET_MAX_IOV);
LASSERT (!in_interrupt());
/* payload is either all vaddrs or all pages */
/* network zero copy callback descriptor embedded in ksock_tx_t */
/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to PTL_MD_MAX_IOV frags of payload of either type. */
+ * header, or up to LNET_MAX_IOV frags of payload of either type. */
typedef union {
- struct iovec iov[PTL_MD_MAX_IOV];
- lnet_kiov_t kiov[PTL_MD_MAX_IOV];
+ struct iovec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
} ksock_rxiovspace_t;
#define SOCKNAL_RX_HEADER 1 /* reading header */
int ksnc_tx_scheduled; /* being progressed */
#if !SOCKNAL_SINGLE_FRAG_RX
- struct iovec ksnc_rx_scratch_iov[PTL_MD_MAX_IOV];
+ struct iovec ksnc_rx_scratch_iov[LNET_MAX_IOV];
#endif
#if !SOCKNAL_SINGLE_FRAG_TX
- struct iovec ksnc_tx_scratch_iov[PTL_MD_MAX_IOV];
+ struct iovec ksnc_tx_scratch_iov[LNET_MAX_IOV];
#endif
} ksock_conn_t;
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
LASSERT (!in_interrupt ());
unsigned long flags;
LASSERT (mlen <= rlen);
- LASSERT (niov <= PTL_MD_MAX_IOV);
+ LASSERT (niov <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
conn->ksnc_rx_niov =
- lnet_extract_iov(PTL_MD_MAX_IOV, conn->ksnc_rx_iov,
+ lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
niov, iov, offset, mlen);
} else {
conn->ksnc_rx_niov = 0;
conn->ksnc_rx_iov = NULL;
conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
conn->ksnc_rx_nkiov =
- lnet_extract_kiov(PTL_MD_MAX_IOV, conn->ksnc_rx_kiov,
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
niov, kiov, offset, mlen);
}
kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];
#if IBNAL_USE_FMR
- LIBCFS_ALLOC(tx->tx_pages, PTL_MD_MAX_IOV *
+ LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV *
sizeof(*tx->tx_pages));
if (tx->tx_pages == NULL)
return -ENOMEM;
#if IBNAL_USE_FMR
if (tx->tx_pages != NULL)
- LIBCFS_FREE(tx->tx_pages, PTL_MD_MAX_IOV *
+ LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV *
sizeof(*tx->tx_pages));
#else
if (tx->tx_wrq != NULL)
fmr_props.pd_hndl = kibnal_data.kib_pd;
fmr_props.acl = (vv_acc_r_mem_write |
vv_acc_l_mem_write);
- fmr_props.max_pages = PTL_MD_MAX_IOV;
+ fmr_props.max_pages = LNET_MAX_IOV;
fmr_props.log2_page_sz = PAGE_SHIFT;
fmr_props.max_outstanding_maps = *kibnal_tunables.kib_fmr_remaps;
#if IBNAL_USE_FMR
# define IBNAL_MAX_RDMA_FRAGS 1
#else
-# define IBNAL_MAX_RDMA_FRAGS PTL_MD_MAX_IOV
+# define IBNAL_MAX_RDMA_FRAGS LNET_MAX_IOV
#endif
/* RX messages (per connection) */
LASSERT (tx->tx_md.md_fmrcount > 0);
LASSERT (page_offset < PAGE_SIZE);
LASSERT (npages >= (1 + ((page_offset + nob - 1)>>PAGE_SHIFT)));
- LASSERT (npages <= PTL_MD_MAX_IOV);
+ LASSERT (npages <= LNET_MAX_IOV);
memset(&map_props, 0, sizeof(map_props));
npages = 0;
do {
- LASSERT (npages < PTL_MD_MAX_IOV);
+ LASSERT (npages < LNET_MAX_IOV);
page = kibnal_kvaddr_to_page(vaddr);
if (page == NULL) {
LASSERT (nob > 0);
LASSERT (nkiov > 0);
- LASSERT (nkiov <= PTL_MD_MAX_IOV);
+ LASSERT (nkiov <= LNET_MAX_IOV);
LASSERT (!tx->tx_md.md_active);
LASSERT ((rd != tx->tx_rd) == !active);
npages = 0;
do {
- LASSERT (npages < PTL_MD_MAX_IOV);
+ LASSERT (npages < LNET_MAX_IOV);
LASSERT (nkiov > 0);
if ((npages > 0 && kiov->kiov_offset != 0) ||
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
/* Thread context */
LASSERT (!in_interrupt());
LASSERT (the_lnet.ln_refcount > 0);
if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
- umd.length > PTL_MD_MAX_IOV) /* too many fragments */
+ umd.length > LNET_MAX_IOV) /* too many fragments */
return -EINVAL;
md = lnet_md_alloc(&umd);
LASSERT (the_lnet.ln_refcount > 0);
if ((umd.options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
- umd.length > PTL_MD_MAX_IOV) /* too many fragments */
+ umd.length > LNET_MAX_IOV) /* too many fragments */
return -EINVAL;
md = lnet_md_alloc(&umd);
{
lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
- LASSERT (msg->msg_len <= PTL_MTU);
+ LASSERT (msg->msg_len <= LNET_MTU);
while (msg->msg_len > rbp->rbp_npages * PAGE_SIZE) {
rbp++;
LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
case LNET_MSG_PUT:
case LNET_MSG_REPLY:
- if (payload_length > PTL_MTU) {
+ if (payload_length > LNET_MTU) {
CERROR("%s, src %s: bad %s payload %d "
"(%d max expected)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
lnet_msgtyp2str(type),
- payload_length, PTL_MTU);
+ payload_length, LNET_MTU);
return -EPROTO;
}
break;
lnet_init_rtrpools(void)
{
int small_pages = 1;
- int large_pages = (PTL_MTU + PAGE_SIZE - 1) / PAGE_SIZE;
+ int large_pages = (LNET_MTU + PAGE_SIZE - 1) / PAGE_SIZE;
lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
LASSERT (!msg->msg_routing);
LASSERT (msg->msg_kiov == NULL);
+ LASSERT (msg->msg_niov <= PTL_MD_MAX_IOV); /* !!! */
+
plp = ptllnd_find_peer(ni, msg->msg_target.nid, 1);
if (plp == NULL)
return -ENOMEM;
int nob;
LASSERT (kiov == NULL);
+ LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
switch (rx->rx_msg->ptlm_type) {
default:
/* tunables (via environment) */
int tcpnal_acceptor_port = 988;
-int tcpnal_buffer_size = 2 * (PTL_MTU + sizeof(lnet_hdr_t));
+int tcpnal_buffer_size = 2 * (LNET_MTU + sizeof(lnet_hdr_t));
int tcpnal_nagle = 0;
int