/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
return -EIO;
}
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
tx->tx_lnet_msg = lntmsg;
/* lnet_finalize() will be called when tx is torn down, so I must
/* peer has now got my ref on 'tx' */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
tx->tx_tposted = jiffies;
kptllnd_peer_close(peer, -EIO);
/* Everything (including this RDMA) queued on the peer will
* be completed with failure */
- kptllnd_schedule_ptltrace_dump();
}
return 0;
LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
-#ifdef CRAY_XT3
- if (lntmsg != NULL &&
- rx->rx_uid != 0) {
- /* Set the UID if the sender's uid isn't 0; i.e. non-root
- * running in userspace (e.g. a catamount node; linux kernel
- * senders, including routers have uid 0). If this is a lustre
- * RPC request, this tells lustre not to trust the creds in the
- * RPC message body. */
- lnet_set_msg_uid(ni, lntmsg, rx->rx_uid);
- }
-#endif
switch(rxmsg->ptlm_type)
{
default:
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
- /* add a check for needs ptltrace
- * yes, this is blatant hijacking of this thread
- * we can't dump directly from tx or rx _callbacks as it
- * deadlocks portals and takes out the node
- */
-
- if (cfs_atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
-#ifdef CRAY_XT3
- kptllnd_dump_ptltrace();
- /* we only dump once, no matter how many pending */
- cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
-#else
- LBUG();
-#endif
- }
-
timeout = (int)(deadline - jiffies);
-
if (timeout <= 0) {
const int n = 4;
const int p = 1;
cfs_waitlink_init(&waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
kptl_rx_t, rx_list);
cfs_list_del(&rx->rx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_parse(rx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
rxb_repost_list);
cfs_list_del(&rxb->rxb_repost_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_buffer_post(rxb);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
kptl_tx_t, tx_list);
cfs_list_del_init(&tx->tx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock, flags);
kptllnd_tx_fini(tx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
&waitlink);
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
flags);
if (!did_something)
cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
counter = 0;
}
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
kptllnd_thread_fini();
return 0;