-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
LIBCFS_FREE(tx, sizeof(*tx));
- atomic_dec(&kptllnd_data.kptl_ntx);
+ cfs_atomic_dec(&kptllnd_data.kptl_ntx);
/* Keep the tunable in step for visibility */
- *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+ *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
}
kptl_tx_t *
return NULL;
}
- atomic_inc(&kptllnd_data.kptl_ntx);
+ cfs_atomic_inc(&kptllnd_data.kptl_ntx);
/* Keep the tunable in step for visibility */
- *kptllnd_tunables.kptl_ntx = atomic_read(&kptllnd_data.kptl_ntx);
+ *kptllnd_tunables.kptl_ntx = cfs_atomic_read(&kptllnd_data.kptl_ntx);
tx->tx_idle = 1;
tx->tx_rdma_mdh = PTL_INVALID_HANDLE;
tx->tx_rdma_eventarg.eva_type = PTLLND_EVENTARG_TYPE_RDMA;
tx->tx_msg_eventarg.eva_type = PTLLND_EVENTARG_TYPE_MSG;
tx->tx_msg = NULL;
+ tx->tx_peer = NULL;
tx->tx_frags = NULL;
LIBCFS_ALLOC(tx->tx_msg, sizeof(*tx->tx_msg));
{
int n = *kptllnd_tunables.kptl_ntx;
int i;
-
+
for (i = 0; i < n; i++) {
kptl_tx_t *tx = kptllnd_alloc_tx();
-
if (tx == NULL)
return -ENOMEM;
-
- spin_lock(&kptllnd_data.kptl_tx_lock);
-
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
-
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+
+ spin_lock(&kptllnd_data.kptl_tx_lock);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
}
-
+
return 0;
}
/* No locking; single threaded now */
LASSERT (kptllnd_data.kptl_shutdown == 2);
- while (!list_empty(&kptllnd_data.kptl_idle_txs)) {
- tx = list_entry(kptllnd_data.kptl_idle_txs.next,
- kptl_tx_t, tx_list);
-
- list_del(&tx->tx_list);
+ while (!cfs_list_empty(&kptllnd_data.kptl_idle_txs)) {
+ tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next,
+ kptl_tx_t, tx_list);
+
+ cfs_list_del(&tx->tx_list);
kptllnd_free_tx(tx);
}
- LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
+ LASSERT (cfs_atomic_read(&kptllnd_data.kptl_ntx) == 0);
}
kptl_tx_t *
{
kptl_tx_t *tx = NULL;
- if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) &&
+ if (IS_SIMULATION_ENABLED(FAIL_TX_PUT_ALLOC) &&
type == TX_TYPE_PUT_REQUEST) {
CERROR("FAIL_TX_PUT_ALLOC SIMULATION triggered\n");
return NULL;
}
- if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) &&
+ if (IS_SIMULATION_ENABLED(FAIL_TX_GET_ALLOC) &&
type == TX_TYPE_GET_REQUEST) {
CERROR ("FAIL_TX_GET_ALLOC SIMULATION triggered\n");
return NULL;
return NULL;
}
- spin_lock(&kptllnd_data.kptl_tx_lock);
+ spin_lock(&kptllnd_data.kptl_tx_lock);
- if (list_empty (&kptllnd_data.kptl_idle_txs)) {
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ if (cfs_list_empty (&kptllnd_data.kptl_idle_txs)) {
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
tx = kptllnd_alloc_tx();
if (tx == NULL)
return NULL;
} else {
- tx = list_entry(kptllnd_data.kptl_idle_txs.next,
- kptl_tx_t, tx_list);
- list_del(&tx->tx_list);
+ tx = cfs_list_entry(kptllnd_data.kptl_idle_txs.next,
+ kptl_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
}
- LASSERT (atomic_read(&tx->tx_refcount)== 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount)== 0);
LASSERT (tx->tx_idle);
LASSERT (!tx->tx_active);
LASSERT (tx->tx_lnet_msg == NULL);
LASSERT (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE));
tx->tx_type = type;
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_status = 0;
tx->tx_idle = 0;
tx->tx_tposted = 0;
ptl_handle_md_t rdma_mdh;
unsigned long flags;
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
tx->tx_lnet_replymsg == NULL));
/* stash the tx on its peer until it completes */
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_active = 1;
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* These unlinks will ensure completion events (normal or unlink) will
* happen ASAP */
unsigned long flags;
ptl_err_t prc;
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
(tx->tx_lnet_msg == NULL &&
tx->tx_replymsg == NULL));
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) {
prc = PtlMDUnlink(msg_mdh);
rdma_mdh = PTL_INVALID_HANDLE;
}
- spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
/* update tx_???_mdh if callback hasn't fired */
if (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE))
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
/* stash the tx on its peer until it completes */
- atomic_set(&tx->tx_refcount, 1);
+ cfs_atomic_set(&tx->tx_refcount, 1);
tx->tx_active = 1;
- list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
kptllnd_peer_addref(peer); /* extra ref for me... */
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* This will get the watchdog thread to try aborting all the peer's
* comms again. NB, this deems it fair that 1 failing tx which can't
int status = tx->tx_status;
int rc;
- LASSERT (!in_interrupt());
- LASSERT (atomic_read(&tx->tx_refcount) == 0);
+ LASSERT (!cfs_in_interrupt());
+ LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_idle);
LASSERT (!tx->tx_active);
tx->tx_peer = NULL;
tx->tx_idle = 1;
- spin_lock(&kptllnd_data.kptl_tx_lock);
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
- spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_lock(&kptllnd_data.kptl_tx_lock);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
/* Must finalize AFTER freeing 'tx' */
if (msg != NULL)
- lnet_finalize(kptllnd_data.kptl_ni, msg,
- (replymsg == NULL) ? status : 0);
+ lnet_finalize(NULL, msg, (replymsg == NULL) ? status : 0);
if (replymsg != NULL)
- lnet_finalize(kptllnd_data.kptl_ni, replymsg, status);
+ lnet_finalize(NULL, replymsg, status);
if (peer != NULL)
kptllnd_peer_decref(peer);
if (!ismsg && ok && ev->type == PTL_EVENT_PUT_END) {
if (ev->hdr_data == PTLLND_RDMA_OK) {
- lnet_set_reply_msg_len(
- kptllnd_data.kptl_ni,
+ lnet_set_reply_msg_len(NULL,
tx->tx_lnet_replymsg,
ev->mlength);
} else {
if (!unlinked)
return;
- spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (ismsg)
tx->tx_msg_mdh = PTL_INVALID_HANDLE;
if (!PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE) ||
!PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE) ||
!tx->tx_active) {
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return;
}
- list_del(&tx->tx_list);
+ cfs_list_del(&tx->tx_list);
tx->tx_active = 0;
- spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* drop peer's ref, but if it was the last one... */
- if (atomic_dec_and_test(&tx->tx_refcount)) {
+ if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
/* ...finalize it in thread context! */
- spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
- wake_up(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
+ cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ flags);
}
}