* == 0: reschedule if someone marked him WANTS_SCHED
* > 0 : force a reschedule */
/* Return code 0 means it did not schedule the conn, 1
- * means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
*/
int
/* Only free the buffer if we used it */
if (tx->tx_buffer_copy != NULL) {
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
tx->tx_buffer_copy = NULL;
CDEBUG(D_MALLOC, "vfreed buffer2\n");
}
LBUG();
}
/* only allow NAK on error and truncate to zero */
- LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
+ LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n",
error, conn, cookie);
tx = kgnilnd_new_tx_msg(nak_type, source);
}
int
-kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
-
+kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
+ struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
int i;
if (nob == 0) {
tx->tx_buffer = NULL;
} else if (kiov != NULL) {
+
+ if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+ niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
+ PAGE_SIZE);
+ }
+
LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
- "bad niov %d\n", niov);
+ "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
+ niov, msg, kiov, iov, offset, nob);
while (offset >= kiov->kiov_len) {
offset -= kiov->kiov_len;
int
kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
- unsigned int niov, struct iovec *iov,
+ unsigned int niov, struct kvec *iov,
unsigned int offset, unsigned int nob)
{
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int nob)
{
int rc;
if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
- GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
+ GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld",
bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
"bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
- GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
+ GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
* GART resource, etc starvation handling */
if (rrc != GNI_RC_SUCCESS) {
GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
- "phys %u pp %u, virt %u nob "LPU64"",
+ "phys %u pp %u, virt %u nob %llu",
tx->tx_phys_npages, dev->gnd_id,
dev->gnd_map_nphys, dev->gnd_map_physnop,
dev->gnd_map_nvirt, dev->gnd_map_virtnob);
NULL, flags, &tx->tx_map_key);
if (rrc != GNI_RC_SUCCESS) {
GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
- "phys %u pp %u, virt %u nob "LPU64"",
+ "phys %u pp %u, virt %u nob %llu",
tx->tx_nob, dev->gnd_id,
dev->gnd_map_nphys, dev->gnd_map_physnop,
dev->gnd_map_nvirt, dev->gnd_map_virtnob);
* verified peer notification - the theory is that
* a TX error can be communicated in all other cases */
if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+ error != -GNILND_NOPURG &&
kgnilnd_check_purgatory_conn(tx->tx_conn)) {
kgnilnd_add_purgatory_tx(tx);
hold_timeout = GNILND_TIMEOUT2DEADMAN;
GNIDBG_TX(D_NET, tx,
- "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
+ "dev %p delaying MDD release for %dms key %#llx.%#llx",
tx->tx_conn->gnc_device, hold_timeout,
tx->tx_map_key.qword1, tx->tx_map_key.qword2);
}
* if we are sending to the same node faster than 256000/sec.
* To help guard against this, we OR in the tx_seq - that is 32 bits */
- tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+ tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
* close message.
*/
if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* Return -ETIME, we are closing the connection already so we dont want to
* have this tx hit the wire. The tx will be killed by the calling function.
* Once the EP is marked dirty the close message will be the last
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(now - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -ETIME;
}
*/
msg->gnm_connstamp = conn->gnc_my_connstamp;
msg->gnm_payload_len = immediatenob;
- msg->gnm_seq = conn->gnc_tx_seq;
+ msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
/* always init here - kgn_checksum is a /sys module tunable
* and can be flipped at any point, even between msg init and sending */
switch (rrc) {
case GNI_RC_SUCCESS:
- conn->gnc_tx_seq++;
+ atomic_inc(&conn->gnc_tx_seq);
conn->gnc_last_tx = jiffies;
/* no locking here as LIVE isn't a list */
kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
/* serialize with seeing CQ events for completion on this, as well as
* tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
atomic_inc(&conn->gnc_device->gnd_short_ntx);
atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
/* XXX Nic: We need to figure out how to track this
* - there are bound to be good reasons for it,
* but we want to know when it happens */
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* We'll handle this error inline - makes the calling logic much more
* clean */
}
default:
/* handle bad retcode gracefully */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -EIO;
}
}
int rc;
timestamp = jiffies;
- mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rc = 0;
} else {
atomic_inc(&conn->gnc_device->gnd_fast_try);
- rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+ rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+ &conn->gnc_smsg_mutex);
}
if (!rc) {
rc = -EAGAIN;
if (tx->tx_buffer_copy == NULL) {
/* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
* and require at most we allocate a few extra bytes. */
- tx->tx_buffer_copy = vmalloc(desc_nob);
+ tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
if (!tx->tx_buffer_copy) {
/* allocation of buffer failed nak the rdma */
rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
if (rc != GNI_RC_SUCCESS) {
/* Registration Failed nak rdma and kill the tx. */
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy,
+ desc_nob);
tx->tx_buffer_copy = NULL;
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
tx->tx_rdma_desc.length = desc_nob;
tx->tx_nob_rdma = nob;
- if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
- tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
+ if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode)
+ tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode;
+ if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode)
+ tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode;
/* prep final completion message */
kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
tx, conn, conn->gnc_close_sent);
GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
- "0x%x cookie:"LPX64,
+ "0x%x cookie:%#llx",
type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
/* set CQ dedicated for RDMA */
tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
if (rrc == GNI_RC_ERROR_RESOURCE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_unmap_buffer(tx, 0);
if (tx->tx_buffer_copy != NULL) {
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
tx->tx_buffer_copy = NULL;
}
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
tx->tx_qtime = jiffies;
spin_unlock(&conn->gnc_list_lock);
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
/* XXX Nic: is this a place we should handle more errors for
* robustness sake */
CDEBUG(D_NET, "consuming %p\n", conn);
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
GNIDBG_SMSG_CREDS(D_NET, conn);
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
kgn_conn_t *conn = rx->grx_conn;
kgn_msg_t *rxmsg = rx->grx_msg;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
CERROR("Couldnt find matching peer %p or conn %p / %p\n",
peer, conn, found_conn);
if (found_conn) {
- CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
+ CERROR("Unexpected connstamp %#llx(%#llx expected)"
" from %s", rxmsg->gnm_connstamp,
found_conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
int
kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int delayed, unsigned int niov,
- struct iovec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
next_check_time);
mod_timer(&timer, (long) jiffies + timeout);
- /* check flag variables before comitting */
+ /* check flag variables before committing */
if (!kgnilnd_data.kgn_shutdown &&
!kgnilnd_data.kgn_quiesce_trigger) {
CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
}
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
dev->gnd_id, num_processed);
return num_processed;
"this is bad, somehow our credits didn't protect us"
" from CQ overrun\n");
LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
- "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+ "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
event_data, GNI_CQ_GET_TYPE(event_data));
rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
&desc);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
/* XXX Nic: Need better error handling here... */
LASSERTF((rrc == GNI_RC_SUCCESS) ||
}
/* remove from rdmaq */
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
spin_unlock(&conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+
+ if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+ event_data = 1LL << 48;
+ rc = 1;
+ }
if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
atomic_inc(&dev->gnd_rdma_ntx);
-EFAULT,
rcookie,
tx->tx_msg.gnm_srcnid);
- kgnilnd_tx_done(tx, -EFAULT);
+ kgnilnd_tx_done(tx, -GNILND_NOPURG);
kgnilnd_close_conn(conn, -ECOMM);
}
}
rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
CDEBUG(D_INFO,
- "SMSG send CQ %d not ready (data "LPX64") "
+ "SMSG send CQ %d not ready (data %#llx) "
"processed %ld\n", dev->gnd_id, event_data,
num_processed);
return num_processed;
"this is bad, somehow our credits didn't "
"protect us from CQ overrun\n");
LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
- "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+ "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
event_data, GNI_CQ_GET_TYPE(event_data));
/* if SMSG couldn't handle an error, time for conn to die */
if (conn == NULL) {
/* Conn was destroyed? */
CDEBUG(D_NET,
- "SMSG CQID lookup "LPX64" failed\n",
+ "SMSG CQID lookup %#llx failed\n",
GNI_CQ_GET_INST_ID(event_data));
write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
continue;
}
/* lock tx_list_state and tx_state */
+ kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
spin_lock(&tx->tx_conn->gnc_list_lock);
GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
spin_unlock(&tx->tx_conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
if (queued_fma) {
CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
return 1;
}
rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
- CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
+ CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx "
"processed %ld\n",
dev->gnd_id, event_data, num_processed);
return num_processed;
/* sender should get error event too and take care
of failed transaction by re-transmitting */
if (rrc == GNI_RC_TRANSACTION_ERROR) {
- CDEBUG(D_NET, "SMSG RX CQ error "LPX64"\n", event_data);
+ CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data);
continue;
}
conn = kgnilnd_cqid2conn_locked(
GNI_CQ_GET_INST_ID(event_data));
if (conn == NULL) {
- CDEBUG(D_NET, "SMSG RX CQID lookup "LPU64" "
- "failed, dropping event "LPX64"\n",
+ CDEBUG(D_NET, "SMSG RX CQID lookup %llu "
+ "failed, dropping event %#llx\n",
GNI_CQ_GET_INST_ID(event_data),
event_data);
} else {
- CDEBUG(D_NET, "SMSG RX: CQID "LPU64" "
+ CDEBUG(D_NET, "SMSG RX: CQID %llu "
"conn %p->%s\n",
GNI_CQ_GET_INST_ID(event_data),
conn, conn->gnc_peer ?
rc = kgnilnd_map_buffer(tx);
}
- /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+ /* rc should be 0 if we mapped successfully here, if non-zero
+ * we are queueing */
if (rc != 0) {
/* if try_map_if_full set, they handle requeuing */
if (unlikely(try_map_if_full)) {
GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
"tx with zero id", NULL);
- CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie "LPX64"\n",
+ CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n",
tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
(tx->tx_id.txe_cookie = cookie)),
"conn 0x%p->%s tx_ref_table hosed: wanted "
- "txe_cookie "LPX64" txe_idx %d "
- "found tx %p cookie "LPX64" txe_idx %d\n",
+ "txe_cookie %#llx txe_idx %d "
+ "found tx %p cookie %#llx txe_idx %d\n",
conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
cookie, ev_id.txe_idx,
tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
tx->tx_state, GNILND_TX_WAITING_REPLY,
libcfs_nid2str(conn->gnc_peer->gnp_nid));
} else {
- CWARN("Unmatched reply %02x, or %02x/"LPX64" from %s\n",
+ CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n",
type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
}
return tx;
tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
if (rc == -EFAULT) {
- CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
+ CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n",
rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
if(*kgnilnd_tunables.kgn_efault_lbug) {
RETURN_EXIT;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(timestamp - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
rc = -ETIME;
kgnilnd_close_conn(conn, rc);
RETURN_EXIT;
rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
RETURN_EXIT;
}
*/
if (rrc == GNI_RC_INVALID_STATE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
"detected closing conn %p from peer %s\n", conn,
libcfs_nid2str(conn->gnc_peer->gnp_nid));
rx = kgnilnd_alloc_rx();
if (rx == NULL) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_release_msg(conn);
GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
conn, libcfs_nid2str(peer->gnp_nid));
GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
timestamp = conn->gnc_last_rx;
- last_seq = conn->gnc_rx_seq;
+ seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+ atomic_inc(&conn->gnc_rx_seq);
conn->gnc_last_rx = jiffies;
/* stash first rx so we can clear out purgatory
if (conn->gnc_first_rx == 0)
conn->gnc_first_rx = jiffies;
- seq = conn->gnc_rx_seq++;
-
/* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_peer_alive(conn->gnc_peer);
rx->grx_msg = msg;
}
if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
- GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp "LPX64"("LPX64
+ GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx"
" expected) from %s",
msg->gnm_connstamp, conn->gnc_peer_connstamp,
libcfs_nid2str(peer->gnp_nid));
conn, last_seq,
cfs_duration_sec(now - timestamp),
cfs_duration_sec(now - conn->gnc_last_rx_cq),
- conn->gnc_tx_seq,
+ atomic_read(&conn->gnc_tx_seq),
cfs_duration_sec(now - conn->gnc_last_tx),
cfs_duration_sec(now - conn->gnc_last_tx_cq),
cfs_duration_sec(now - conn->gnc_last_noop_want),
} else {
GNIDBG_TX(log_retrans_level, tx,
"transient map failure #%d %d pages/%d bytes phys %u@%u "
- "virt %u@"LPU64" "
+ "virt %u@%llu "
"nq_map %d mdd# %d/%d GART %ld",
dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,