An LNet user can allocate a large contiguous MD. That MD can have >
LNET_MAX_IOV pages which causes some LNDs to assert on either niov
argument passed to lnd_recv() or the value stored in
lnet_msg::msg_niov. This is true even in cases where the actual
transfer size is <= LNET_MTU and will not exceed limits in the LNDs.
Adjust ksocklnd_send()/ksocklnd_recv() to assert on the return value
of lnet_extract_kiov().
Remove the assert on msg_niov (payload_niov) from kiblnd_send().
kiblnd_setup_rd_kiov() will already fail if we exceed ko2iblnd's
available scatter gather entries.
HPE-bug-id: LUS-10878
Test-Parameters: trivial
Fixes:
857f11169f ("LU-13004 lnet: always put a page list into struct lnet_libmd")
Signed-off-by: Chris Horn <chris.horn@hpe.com>
Change-Id: Iaa851d90f735d04e5167bb9c07235625759245b2
Reviewed-on: https://review.whamcloud.com/47319
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Serguei Smirnov <ssmirnov@whamcloud.com>
Reviewed-by: Alexey Lyashkov <alexey.lyashkov@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
payload_nob, payload_niov, libcfs_idstr(target));
LASSERT(payload_nob == 0 || payload_niov > 0);
payload_nob, payload_niov, libcfs_idstr(target));
LASSERT(payload_nob == 0 || payload_niov > 0);
- LASSERT(payload_niov <= LNET_MAX_IOV);
/* Thread context */
LASSERT(!in_interrupt());
/* Thread context */
LASSERT(!in_interrupt());
payload_nob, payload_niov, libcfs_idstr(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
payload_nob, payload_niov, libcfs_idstr(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
LASSERT (!in_interrupt ());
desc_size = offsetof(struct ksock_tx,
LASSERT (!in_interrupt ());
desc_size = offsetof(struct ksock_tx,
payload_niov, payload_kiov,
payload_offset, payload_nob);
payload_niov, payload_kiov,
payload_offset, payload_nob);
+ LASSERT(tx->tx_nkiov <= LNET_MAX_IOV);
+
if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
tx->tx_zc_capable = 1;
if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
tx->tx_zc_capable = 1;
struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
- LASSERT (niov <= LNET_MAX_IOV);
conn->ksnc_lnet_msg = msg;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_lnet_msg = msg;
conn->ksnc_rx_nob_wanted = mlen;
niov, kiov, offset, mlen);
}
niov, kiov, offset, mlen);
}
+ LASSERT(conn->ksnc_rx_nkiov <= LNET_MAX_IOV);
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));