typedef struct
{
__u64 kptlhm_matchbits; /* matchbits */
- __u32 kptlhm_max_immd_size; /* immd message size */
+ __u32 kptlhm_max_msg_size; /* max message size */
} WIRE_ATTR kptl_hello_msg_t;
typedef struct kptl_msg
The maximum number of concurrent sends that are
outstanding to a single piere at any given instant.
- max_immd_size:
+ max_msg_size:
The maximum immedate message size. This MUST be
the same on all nodes in a cluster. A peer connecting
- with a diffrent max_immd_size will be rejected.
+ with a diffrent max_msg_size will be rejected.
break;
case PTLLND_MSG_TYPE_HELLO:
__swab64s(&msg->ptlm_u.hello.kptlhm_matchbits);
- __swab32s(&msg->ptlm_u.hello.kptlhm_max_immd_size);
+ __swab32s(&msg->ptlm_u.hello.kptlhm_max_msg_size);
break;
default:
CERROR("Bad message type: %d\n", msg->ptlm_type);
kptllnd_data->kptl_rx_cache = cfs_mem_cache_create (
"ptllnd_rx",
- sizeof(kptl_rx_t) + *kptllnd_tunables.kptl_max_immd_size,
+ sizeof(kptl_rx_t) + *kptllnd_tunables.kptl_max_msg_size,
0, /* offset */
0, /* flags */
NULL,NULL); /* CTOR/DTOR */
CDEBUG(D_INFO,"rxb_npages = %d\n",*kptllnd_tunables.kptl_rxb_npages);
CDEBUG(D_INFO,"credits = %d\n",*kptllnd_tunables.kptl_credits);
CDEBUG(D_INFO,"peercredits = %d\n",*kptllnd_tunables.kptl_peercredits);
- CDEBUG(D_INFO,"max_immd_size = %d\n",*kptllnd_tunables.kptl_max_immd_size);
+ CDEBUG(D_INFO,"max_msg_size = %d\n",*kptllnd_tunables.kptl_max_msg_size);
ptllnd_assert_wire_constants();
int *kptl_rxb_npages; /* number of pages for rx buffer */
int *kptl_credits; /* number of credits */
int *kptl_peercredits; /* number of credits */
- int *kptl_max_immd_size; /* max immd message size*/
+ int *kptl_max_msg_size; /* max immd message size*/
int *kptl_peer_hash_table_size; /* # slots in peer hash table */
#ifdef PJK_DEBUGGING
/* Is the payload small enough not to need RDMA? */
nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[payload_nob]);
- if (nob <= *kptllnd_tunables.kptl_max_immd_size)
+ if (nob <= *kptllnd_tunables.kptl_max_msg_size)
break;
/* Is the payload small enough not to need RDMA? */
nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[lntmsg->msg_md->md_length]);
- if (nob <= *kptllnd_tunables.kptl_max_immd_size)
+ if (nob <= *kptllnd_tunables.kptl_max_msg_size)
break;
STAT_UPDATE(kps_send_get);
/* Is the payload small enough not to need RDMA? */
nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[payload_nob]);
- if (nob <= *kptllnd_tunables.kptl_max_immd_size)
+ if (nob <= *kptllnd_tunables.kptl_max_msg_size)
break;
kptllnd_do_put(tx,lntmsg,kptllnd_data);
if (rx->rx_msg->ptlm_type == PTLLND_MSG_TYPE_IMMEDIATE) {
/* RDMA not expected */
nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[payload_nob]);
- if (nob > *kptllnd_tunables.kptl_max_immd_size) {
+ if (nob > *kptllnd_tunables.kptl_max_msg_size) {
CERROR("REPLY for "LPX64" too big but RDMA not requested:"
"%d (max for message is %d)\n",
target.nid, payload_nob,
- *kptllnd_tunables.kptl_max_immd_size);
+ *kptllnd_tunables.kptl_max_msg_size);
CERROR("Can't REPLY IMMEDIATE %d to "LPX64"\n",
nob, target.nid);
return -EINVAL;
STAT_UPDATE(kps_send_immd);
LASSERT (offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[payload_nob])
- <= *kptllnd_tunables.kptl_max_immd_size);
+ <= *kptllnd_tunables.kptl_max_msg_size);
/*
* Setup the header
if (payload_nob > 0) {
if (payload_kiov != NULL)
lnet_copy_kiov2flat(
- *kptllnd_tunables.kptl_max_immd_size,
+ *kptllnd_tunables.kptl_max_msg_size,
tx->tx_msg->ptlm_u.immediate.kptlim_payload,
0,
payload_niov, payload_kiov,
payload_offset, payload_nob);
else
lnet_copy_iov2flat(
- *kptllnd_tunables.kptl_max_immd_size,
+ *kptllnd_tunables.kptl_max_msg_size,
tx->tx_msg->ptlm_u.immediate.kptlim_payload,
0,
payload_niov, payload_iov,
PJK_UT_MSG_DATA("Eager RX=%p RXB=%p\n",rx,rx->rx_rxb);
- LASSERT(rx->rx_nob < *kptllnd_tunables.kptl_max_immd_size);
+ LASSERT(rx->rx_nob < *kptllnd_tunables.kptl_max_msg_size);
/*
* Copy the data directly into the RX
PJK_UT_MSG_DATA("PTLLND_MSG_TYPE_IMMEDIATE\n");
nob = offsetof(kptl_msg_t, ptlm_u.immediate.kptlim_payload[rlen]);
- if (nob > *kptllnd_tunables.kptl_max_immd_size) {
+ if (nob > *kptllnd_tunables.kptl_max_msg_size) {
CERROR ("Immediate message from "LPX64" too big: %d\n",
rxmsg->ptlm_u.immediate.kptlim_hdr.src_nid, rlen);
rc = -EINVAL;
if (kiov != NULL)
lnet_copy_flat2kiov(
niov, kiov, offset,
- *kptllnd_tunables.kptl_max_immd_size,
+ *kptllnd_tunables.kptl_max_msg_size,
rxmsg->ptlm_u.immediate.kptlim_payload,
0,
mlen);
else
lnet_copy_flat2iov(
niov, iov, offset,
- *kptllnd_tunables.kptl_max_immd_size,
+ *kptllnd_tunables.kptl_max_msg_size,
rxmsg->ptlm_u.immediate.kptlim_payload,
0,
mlen);
CFS_MODULE_PARM(peercredits, "i", int, 0444,
"concurrent sends to 1 peer");
-static int max_immd_size = PTLLND_MAX_MSG_SIZE;
-CFS_MODULE_PARM(max_immd_size, "i", int, 0444,
+static int max_msg_size = PTLLND_MAX_MSG_SIZE;
+CFS_MODULE_PARM(max_msg_size, "i", int, 0444,
"max size of immediate message");
static int peer_hash_table_size = PTLLND_PEER_HASH_SIZE;
.kptl_rxb_npages = &rxb_npages,
.kptl_credits = &credits,
.kptl_peercredits = &peercredits,
- .kptl_max_immd_size = &max_immd_size,
+ .kptl_max_msg_size = &max_msg_size,
.kptl_peer_hash_table_size = &peer_hash_table_size,
#ifdef PJK_DEBUGGING
.kptl_simulation_bitmap = &simulation_bitmap,
sizeof(int), 0444, NULL, &proc_dointvec},
{8, "peercredits", &peercredits,
sizeof(int), 0444, NULL, &proc_dointvec},
- {9, "max_immd_size", &max_immd_size,
+ {9, "max_msg_size", &max_msg_size,
sizeof(int), 0444, NULL, &proc_dointvec},
{10, "peer_hash_table_size,", &peer_hash_table_size,
sizeof(int), 0444, NULL, &proc_dointvec},
/*
* Immediate message sizes MUST be equal
*/
- if( msg->ptlm_u.hello.kptlhm_max_immd_size !=
- *kptllnd_tunables.kptl_max_immd_size){
+ if( msg->ptlm_u.hello.kptlhm_max_msg_size !=
+ *kptllnd_tunables.kptl_max_msg_size){
CERROR("IMMD message size MUST be equal for all peers got %d expected %d\n",
- msg->ptlm_u.hello.kptlhm_max_immd_size,
- *kptllnd_tunables.kptl_max_immd_size);
+ msg->ptlm_u.hello.kptlhm_max_msg_size,
+ *kptllnd_tunables.kptl_max_msg_size);
return 0;
}
*/
tx_hello->tx_msg->ptlm_u.hello.kptlhm_matchbits =
safe_matchbits_to_peer;
- tx_hello->tx_msg->ptlm_u.hello.kptlhm_max_immd_size =
- *kptllnd_tunables.kptl_max_immd_size;
+ tx_hello->tx_msg->ptlm_u.hello.kptlhm_max_msg_size =
+ *kptllnd_tunables.kptl_max_msg_size;
/*
* Try and attach this peer to the list
* a default message.
*/
tx_hello->tx_msg->ptlm_u.hello.kptlhm_matchbits = 0;
- tx_hello->tx_msg->ptlm_u.hello.kptlhm_max_immd_size =
- *kptllnd_tunables.kptl_max_immd_size;
+ tx_hello->tx_msg->ptlm_u.hello.kptlhm_max_msg_size =
+ *kptllnd_tunables.kptl_max_msg_size;
/*
* Allocate a new peer
* +1 to handle any rounding error
*/
nbuffers = (rxbp->rxbp_reserved) *
- (*kptllnd_tunables.kptl_max_immd_size) /
+ (*kptllnd_tunables.kptl_max_msg_size) /
(PAGE_SIZE * (*kptllnd_tunables.kptl_rxb_npages));
++nbuffers ;
md.options |= PTL_MD_EVENT_START_DISABLE;
md.options |= PTL_MD_MAX_SIZE;
md.user_ptr = rxb;
- md.max_size = *kptllnd_tunables.kptl_max_immd_size;
+ md.max_size = *kptllnd_tunables.kptl_max_msg_size;
md.eq_handle = kptllnd_data->kptl_eqh;
*/
tx->tx_state = TX_STATE_ON_IDLE_QUEUE;
- LIBCFS_ALLOC( tx->tx_msg, *kptllnd_tunables.kptl_max_immd_size );
+ LIBCFS_ALLOC( tx->tx_msg, *kptllnd_tunables.kptl_max_msg_size );
if(tx->tx_msg == NULL){
CERROR("Failed to allocate TX payload\n");
kptllnd_cleanup_tx_descs(kptllnd_data);
LASSERT( tx->tx_state == TX_STATE_ON_IDLE_QUEUE );
- LIBCFS_FREE(tx->tx_msg,*kptllnd_tunables.kptl_max_immd_size);
+ LIBCFS_FREE(tx->tx_msg,*kptllnd_tunables.kptl_max_msg_size);
}
}
CHECK_STRUCT (kptl_hello_msg_t);
CHECK_MEMBER (kptl_hello_msg_t, kptlhm_matchbits);
- CHECK_MEMBER (kptl_hello_msg_t, kptlhm_max_immd_size);
+ CHECK_MEMBER (kptl_hello_msg_t, kptlhm_max_msg_size);
printf ("}\n\n");