}
int
-kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
-
+kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
+ struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
int i;
if (nob == 0) {
tx->tx_buffer = NULL;
} else if (kiov != NULL) {
+
+ if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+ niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
+ PAGE_SIZE);
+ }
+
LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d\n", niov);
+ "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
+ niov, msg, kiov, iov, offset, nob);
while (offset >= kiov->kiov_len) {
offset -= kiov->kiov_len;
int
kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
- unsigned int niov, struct iovec *iov,
+ unsigned int niov, struct kvec *iov,
unsigned int offset, unsigned int nob)
{
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int nob)
{
int rc;
* verified peer notification - the theory is that
* a TX error can be communicated in all other cases */
if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+ error != -GNILND_NOPURG &&
kgnilnd_check_purgatory_conn(tx->tx_conn)) {
kgnilnd_add_purgatory_tx(tx);
*/
msg->gnm_connstamp = conn->gnc_my_connstamp;
msg->gnm_payload_len = immediatenob;
- kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
/* always init here - kgn_checksum is a /sys module tunable
timestamp = jiffies;
kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rc = 0;
} else {
atomic_inc(&conn->gnc_device->gnd_fast_try);
- rc = kgnilnd_gl_mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+ rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+ &conn->gnc_smsg_mutex);
}
if (!rc) {
rc = -EAGAIN;
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
kgn_conn_t *conn = rx->grx_conn;
kgn_msg_t *rxmsg = rx->grx_msg;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
int
kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
next_check_time);
mod_timer(&timer, (long) jiffies + timeout);
- /* check flag variables before comitting */
+ /* check flag variables before committing */
if (!kgnilnd_data.kgn_shutdown &&
!kgnilnd_data.kgn_quiesce_trigger) {
CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
spin_unlock(&conn->gnc_list_lock);
kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+ if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+ event_data = 1LL << 48;
+ rc = 1;
+ }
+
if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
atomic_inc(&dev->gnd_rdma_ntx);
atomic64_add(tx->tx_nob, &dev->gnd_rdma_txbytes);
-EFAULT,
rcookie,
tx->tx_msg.gnm_srcnid);
- kgnilnd_tx_done(tx, -EFAULT);
+ kgnilnd_tx_done(tx, -GNILND_NOPURG);
kgnilnd_close_conn(conn, -ECOMM);
}