atomic_t gnc_sched_noop; /* # sched triggered NOOP */
unsigned int gnc_timeout; /* infer peer death if no rx for this many seconds */
__u32 gnc_cqid; /* my completion callback id (non-unique) */
- __u32 gnc_tx_seq; /* tx msg sequence number */
- __u32 gnc_rx_seq; /* rx msg sequence number */
+ atomic_t gnc_tx_seq; /* tx msg sequence number */
+ atomic_t gnc_rx_seq; /* rx msg sequence number */
+ struct mutex gnc_smsg_mutex; /* tx smsg sequence serialization */
+ struct mutex gnc_rdma_mutex; /* tx rdma sequence serialization */
__u64 gnc_tx_retrans; /* # retrans on SMSG */
atomic_t gnc_nlive_fma; /* # live FMA */
atomic_t gnc_nq_rdma; /* # queued (on device) RDMA */
atomic_t kgn_rev_copy_buff; /* # of REV rdma buffer copies */
struct socket *kgn_sock; /* for Apollo */
unsigned long free_pages_limit; /* # of free pages reserve from fma block allocations */
+ int kgn_enable_gl_mutex; /* kgni api mtx enable */
} kgn_data_t;
extern kgn_data_t kgnilnd_data;
atomic_dec(&kgnilnd_data.kgn_nthreads);
}
+static inline int kgnilnd_gl_mutex_trylock(struct mutex *lock)
+{
+ if (kgnilnd_data.kgn_enable_gl_mutex)
+ return mutex_trylock(lock);
+ else
+ return 1;
+}
+
+static inline void kgnilnd_gl_mutex_lock(struct mutex *lock)
+{
+ if (kgnilnd_data.kgn_enable_gl_mutex)
+ mutex_lock(lock);
+}
+
+static inline void kgnilnd_gl_mutex_unlock(struct mutex *lock)
+{
+ if (kgnilnd_data.kgn_enable_gl_mutex)
+ mutex_unlock(lock);
+}
+
+static inline void kgnilnd_conn_mutex_lock(struct mutex *lock)
+{
+ if (!kgnilnd_data.kgn_enable_gl_mutex)
+ mutex_lock(lock);
+}
+
+static inline void kgnilnd_conn_mutex_unlock(struct mutex *lock)
+{
+ if (!kgnilnd_data.kgn_enable_gl_mutex)
+ mutex_unlock(lock);
+}
+
/* like mutex_trylock but with a jiffies spinner. This is to allow certain
* parts of the code to avoid a scheduler trip when the mutex is held
*
int ret;
unsigned long timeout;
+ if (!kgnilnd_data.kgn_enable_gl_mutex)
+ return 1;
+
LASSERT(!in_interrupt());
for (timeout = jiffies + 1; time_before(jiffies, timeout);) {
#undef DO_TYPE
-/* API wrapper functions - include late to pick up all of the other defines */
-#include "gnilnd_api_wrap.h"
-
/* pulls in tunables per platform and adds in nid/nic conversion
* if RCA wasn't available at build time */
#include "gnilnd_hss_ops.h"
#error "Undefined Network Hardware Type"
#endif
+/* API wrapper functions - include late to pick up all of the other defines */
+#include "gnilnd_api_wrap.h"
+
#endif /* _GNILND_GNILND_H_ */
* if we are sending to the same node faster than 256000/sec.
* To help guard against this, we OR in the tx_seq - that is 32 bits */
- tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+ tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
* close message.
*/
if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* Return -ETIME, we are closing the connection already so we dont want to
* have this tx hit the wire. The tx will be killed by the calling function.
* Once the EP is marked dirty the close message will be the last
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(now - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -ETIME;
}
*/
msg->gnm_connstamp = conn->gnc_my_connstamp;
msg->gnm_payload_len = immediatenob;
- msg->gnm_seq = conn->gnc_tx_seq;
+ kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
+ msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
/* always init here - kgn_checksum is a /sys module tunable
* and can be flipped at any point, even between msg init and sending */
switch (rrc) {
case GNI_RC_SUCCESS:
- conn->gnc_tx_seq++;
+ atomic_inc(&conn->gnc_tx_seq);
conn->gnc_last_tx = jiffies;
/* no locking here as LIVE isn't a list */
kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
/* serialize with seeing CQ events for completion on this, as well as
* tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
atomic_inc(&conn->gnc_device->gnd_short_ntx);
atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
/* XXX Nic: We need to figure out how to track this
* - there are bound to be good reasons for it,
* but we want to know when it happens */
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
/* We'll handle this error inline - makes the calling logic much more
* clean */
}
default:
/* handle bad retcode gracefully */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
return -EIO;
}
}
int rc;
timestamp = jiffies;
- mutex_lock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rc = 0;
} else {
atomic_inc(&conn->gnc_device->gnd_fast_try);
- rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+ rc = kgnilnd_gl_mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
}
if (!rc) {
rc = -EAGAIN;
tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
if (rrc == GNI_RC_ERROR_RESOURCE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_unmap_buffer(tx, 0);
if (tx->tx_buffer_copy != NULL) {
kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
tx->tx_qtime = jiffies;
spin_unlock(&conn->gnc_list_lock);
-
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
/* XXX Nic: is this a place we should handle more errors for
* robustness sake */
CDEBUG(D_NET, "consuming %p\n", conn);
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
GNIDBG_SMSG_CREDS(D_NET, conn);
}
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
dev->gnd_id, num_processed);
return num_processed;
rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
&desc);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
/* XXX Nic: Need better error handling here... */
LASSERTF((rrc == GNI_RC_SUCCESS) ||
}
/* remove from rdmaq */
+ kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
spin_lock(&conn->gnc_list_lock);
kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
spin_unlock(&conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
atomic_inc(&dev->gnd_rdma_ntx);
}
rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
CDEBUG(D_INFO,
}
/* lock tx_list_state and tx_state */
+ kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
spin_lock(&tx->tx_conn->gnc_list_lock);
GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
spin_unlock(&tx->tx_conn->gnc_list_lock);
+ kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
if (queued_fma) {
CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
return 1;
}
rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
- mutex_unlock(&dev->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
if (rrc == GNI_RC_NOT_DONE) {
CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
RETURN_EXIT;
timestamp = jiffies;
- mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
/* delay in jiffies - we are really concerned only with things that
* result in a schedule() or really holding this off for long times .
* NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
libcfs_nid2str(conn->gnc_peer->gnp_nid),
cfs_duration_sec(timestamp - newest_last_rx),
cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
rc = -ETIME;
kgnilnd_close_conn(conn, rc);
RETURN_EXIT;
rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
if (rrc == GNI_RC_NOT_DONE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
RETURN_EXIT;
}
*/
if (rrc == GNI_RC_INVALID_STATE) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
"detected closing conn %p from peer %s\n", conn,
libcfs_nid2str(conn->gnc_peer->gnp_nid));
rx = kgnilnd_alloc_rx();
if (rx == NULL) {
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_release_msg(conn);
GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
conn, libcfs_nid2str(peer->gnp_nid));
GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
timestamp = conn->gnc_last_rx;
- last_seq = conn->gnc_rx_seq;
+ seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+ atomic_inc(&conn->gnc_rx_seq);
conn->gnc_last_rx = jiffies;
/* stash first rx so we can clear out purgatory
if (conn->gnc_first_rx == 0)
conn->gnc_first_rx = jiffies;
- seq = conn->gnc_rx_seq++;
-
/* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
- mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+ kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
kgnilnd_peer_alive(conn->gnc_peer);
rx->grx_msg = msg;
conn, last_seq,
cfs_duration_sec(now - timestamp),
cfs_duration_sec(now - conn->gnc_last_rx_cq),
- conn->gnc_tx_seq,
+ atomic_read(&conn->gnc_tx_seq),
cfs_duration_sec(now - conn->gnc_last_tx),
cfs_duration_sec(now - conn->gnc_last_tx_cq),
cfs_duration_sec(now - conn->gnc_last_noop_want),