/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
kptllnd_rx_buffer_pool_init(kptl_rx_buffer_pool_t *rxbp)
{
memset(rxbp, 0, sizeof(*rxbp));
- cfs_spin_lock_init(&rxbp->rxbp_lock);
+ spin_lock_init(&rxbp->rxbp_lock);
CFS_INIT_LIST_HEAD(&rxbp->rxbp_list);
}
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_reserve(%d)\n", count);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
for (;;) {
if (rxbp->rxbp_shutdown) {
break;
}
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_ALLOC(rxb, sizeof(*rxb));
LIBCFS_ALLOC(buffer, bufsize);
if (buffer != NULL)
LIBCFS_FREE(buffer, bufsize);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ENOMEM;
break;
}
rxb->rxb_buffer = buffer;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_FREE(rxb, sizeof(*rxb));
LIBCFS_FREE(buffer, bufsize);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ESHUTDOWN;
break;
}
cfs_list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
rxbp->rxbp_count++;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
kptllnd_rx_buffer_post(rxb);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
if (rc == 0)
rxbp->rxbp_reserved += count;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return rc;
}
{
unsigned long flags;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_unreserve(%d)\n", count);
rxbp->rxbp_reserved -= count;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
* different MD) from when the MD is actually unlinked, to when the
* event callback tells me it has been unlinked. */
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxbp->rxbp_shutdown = 1;
rxb = cfs_list_entry (tmp, kptl_rx_buffer_t, rxb_list);
if (rxb->rxb_idle) {
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock,
+ spin_unlock_irqrestore(&rxbp->rxbp_lock,
flags);
kptllnd_rx_buffer_destroy(rxb);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock,
+ spin_lock_irqsave(&rxbp->rxbp_lock,
flags);
continue;
}
if (PtlHandleIsEqual(mdh, PTL_INVALID_HANDLE))
continue;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMDUnlink(mdh);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
#ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
/* callback clears rxb_mdh and drops net's ref
if (cfs_list_empty(&rxbp->rxbp_list))
break;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
/* Wait a bit for references to be dropped */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
cfs_pause(cfs_time_seconds(1));
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
any.nid = PTL_NID_ANY;
any.pid = PTL_PID_ANY;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
rxb->rxb_idle = 1;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
rxb->rxb_refcount = 1; /* net's ref */
rxb->rxb_posted = 1; /* I'm posting */
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMEAttach(kptllnd_data.kptl_nih,
*kptllnd_tunables.kptl_portal,
rc = PtlMDAttach(meh, md, PTL_UNLINK, &mdh);
if (rc == PTL_OK) {
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxb->rxb_posted) /* Not auto-unlinked yet!!! */
rxb->rxb_mdh = mdh;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
LASSERT(rc == PTL_OK);
failed:
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
/* XXX this will just try again immediately */
kptllnd_rx_buffer_decref_locked(rxb);
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
kptl_rx_t *
if (peer != NULL) {
/* Update credits (after I've decref-ed the buffer) */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (post_credit == PTLLND_POSTRX_PEER_CREDIT)
peer->peer_outstanding_credits++;
peer->peer_outstanding_credits, peer->peer_sent_credits,
rx);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* I might have to send back credits */
kptllnd_peer_check_sends(peer);
rx->rx_initiator = ev->initiator;
rx->rx_treceived = jiffies;
/* Queue for attention */
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
cfs_list_add_tail(&rx->rx_list,
&kptllnd_data.kptl_sched_rxq);
cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock, flags);
}
}
if (unlinked) {
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
kptllnd_rx_buffer_decref_locked(rxb);
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
}
{
kptl_net_t *net;
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
LASSERT (!net->net_shutdown);
if (net->net_ni->ni_nid == nid) {
kptllnd_net_addref(net);
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
return net;
}
}
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
return NULL;
}
if (peer->peer_state == PEER_STATE_WAITING_HELLO) {
/* recoverable error - restart txs */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
kptllnd_cancel_txlist(&peer->peer_sendq, &txs);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
CWARN("NAK %s: Unexpected %s message\n",
libcfs_id2str(srcid),
LASSERTF (msg->ptlm_srcpid == peer->peer_id.pid, "m %u p %u\n",
msg->ptlm_srcpid, peer->peer_id.pid);
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
/* Check peer only sends when I've sent her credits */
if (peer->peer_sent_credits == 0) {
int oc = peer->peer_outstanding_credits;
int sc = peer->peer_sent_credits;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
CERROR("%s: buffer overrun [%d/%d+%d]\n",
libcfs_id2str(peer->peer_id), c, sc, oc);
post_credit = PTLLND_POSTRX_NO_CREDIT;
}
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* See if something can go out now that credits have come in */
if (msg->ptlm_credits != 0)
PTL_RESERVED_MATCHBITS);
/* Update last match bits seen */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (msg->ptlm_u.rdma.kptlrm_matchbits >
rx->rx_peer->peer_last_matchbits_seen)
rx->rx_peer->peer_last_matchbits_seen =
msg->ptlm_u.rdma.kptlrm_matchbits;
- cfs_spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
+ spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
rc = lnet_parse(net->net_ni,
&msg->ptlm_u.rdma.kptlrm_hdr,