/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
return -EIO;
}
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
tx->tx_lnet_msg = lntmsg;
/* lnet_finalize() will be called when tx is torn down, so I must
* return success from here on... */
- tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * CFS_HZ);
- tx->tx_rdma_mdh = mdh;
- tx->tx_active = 1;
- cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
+ tx->tx_deadline = jiffies + (*kptllnd_tunables.kptl_timeout * HZ);
+ tx->tx_rdma_mdh = mdh;
+ tx->tx_active = 1;
+ cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
/* peer has now got my ref on 'tx' */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
tx->tx_tposted = jiffies;
kptllnd_peer_close(peer, -EIO);
/* Everything (including this RDMA) queued on the peer will
* be completed with failure */
- kptllnd_schedule_ptltrace_dump();
}
return 0;
int nfrag;
int rc;
- LASSERT (net->net_ni == ni);
- LASSERT (!net->net_shutdown);
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
- LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!cfs_in_interrupt());
+ LASSERT (net->net_ni == ni);
+ LASSERT (!net->net_shutdown);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt());
- if (lntmsg->msg_vmflush)
- mpflag = cfs_memory_pressure_get_and_set();
+ if (lntmsg->msg_vmflush)
+ mpflag = cfs_memory_pressure_get_and_set();
- rc = kptllnd_find_target(net, target, &peer);
- if (rc != 0)
- goto out;
+ rc = kptllnd_find_target(net, target, &peer);
+ if (rc != 0)
+ goto out;
/* NB peer->peer_id does NOT always equal target, be careful with
* which one to use */
int nob;
int rc;
- CDEBUG(D_NET, "%s niov=%d offset=%d mlen=%d rlen=%d\n",
- kptllnd_msgtype2str(rxmsg->ptlm_type),
- niov, offset, mlen, rlen);
-
- LASSERT (mlen <= rlen);
- LASSERT (mlen >= 0);
- LASSERT (!cfs_in_interrupt());
- LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
- LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
-
-#ifdef CRAY_XT3
- if (lntmsg != NULL &&
- rx->rx_uid != 0) {
- /* Set the UID if the sender's uid isn't 0; i.e. non-root
- * running in userspace (e.g. a catamount node; linux kernel
- * senders, including routers have uid 0). If this is a lustre
- * RPC request, this tells lustre not to trust the creds in the
- * RPC message body. */
- lnet_set_msg_uid(ni, lntmsg, rx->rx_uid);
- }
-#endif
- switch(rxmsg->ptlm_type)
- {
- default:
+ CDEBUG(D_NET, "%s niov=%d offset=%d mlen=%d rlen=%d\n",
+ kptllnd_msgtype2str(rxmsg->ptlm_type),
+ niov, offset, mlen, rlen);
+
+ LASSERT (mlen <= rlen);
+ LASSERT (mlen >= 0);
+ LASSERT (!in_interrupt());
+ LASSERT (!(kiov != NULL && iov != NULL)); /* never both */
+ LASSERT (niov <= PTL_MD_MAX_IOV); /* !!! */
+
+ switch(rxmsg->ptlm_type)
+ {
+ default:
LBUG();
rc = -EINVAL;
break;
}
int
-kptllnd_thread_start (int (*fn)(void *arg), void *arg)
+kptllnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid;
+ struct task_struct *task;
- cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
+ cfs_atomic_inc(&kptllnd_data.kptl_nthreads);
- pid = cfs_create_thread (fn, arg, 0);
- if (pid >= 0)
- return 0;
-
- CERROR("Failed to start thread: error %d\n", (int)pid);
- kptllnd_thread_fini();
- return (int)pid;
+ task = kthread_run(fn, arg, name);
+ if (IS_ERR(task)) {
+ CERROR("Failed to start thread: error %ld\n", PTR_ERR(task));
+ kptllnd_thread_fini();
+ }
+ return PTR_ERR(task);
}
int
kptllnd_watchdog(void *arg)
{
- int id = (long)arg;
- char name[16];
- cfs_waitlink_t waitlink;
- int stamp = 0;
- int peer_index = 0;
- unsigned long deadline = jiffies;
- int timeout;
- int i;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ int stamp = 0;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
+ int timeout;
+ int i;
- snprintf(name, sizeof(name), "kptllnd_wd_%02d", id);
- cfs_daemonize(name);
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
- /* add a check for needs ptltrace
- * yes, this is blatant hijacking of this thread
- * we can't dump directly from tx or rx _callbacks as it
- * deadlocks portals and takes out the node
- */
-
- if (cfs_atomic_read(&kptllnd_data.kptl_needs_ptltrace)) {
-#ifdef CRAY_XT3
- kptllnd_dump_ptltrace();
- /* we only dump once, no matter how many pending */
- cfs_atomic_set(&kptllnd_data.kptl_needs_ptltrace, 0);
-#else
- LBUG();
-#endif
- }
-
timeout = (int)(deadline - jiffies);
-
if (timeout <= 0) {
const int n = 4;
const int p = 1;
kptllnd_data.kptl_peer_hash_size;
}
- deadline += p * CFS_HZ;
- stamp++;
- continue;
+ deadline += p * HZ;
+ stamp++;
+ continue;
}
kptllnd_handle_closing_peers();
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_watchdog_waitq,
- &waitlink);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_watchdog_waitq,
+ &waitlink);
- cfs_waitq_timedwait(&waitlink, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&waitlink, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
- }
+ set_current_state (TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_watchdog_waitq, &waitlink);
+ }
- kptllnd_thread_fini();
- CDEBUG(D_NET, "<<<\n");
- return (0);
+ kptllnd_thread_fini();
+ CDEBUG(D_NET, "<<<\n");
+ return (0);
};
int
kptllnd_scheduler (void *arg)
{
- int id = (long)arg;
- char name[16];
- cfs_waitlink_t waitlink;
- unsigned long flags;
- int did_something;
- int counter = 0;
- kptl_rx_t *rx;
- kptl_rx_buffer_t *rxb;
- kptl_tx_t *tx;
+ int id = (long)arg;
+ wait_queue_t waitlink;
+ unsigned long flags;
+ int did_something;
+ int counter = 0;
+ kptl_rx_t *rx;
+ kptl_rx_buffer_t *rxb;
+ kptl_tx_t *tx;
- snprintf(name, sizeof(name), "kptllnd_sd_%02d", id);
- cfs_daemonize(name);
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&waitlink);
+ init_waitqueue_entry_current(&waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
kptl_rx_t, rx_list);
cfs_list_del(&rx->rx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_parse(rx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
rxb_repost_list);
cfs_list_del(&rxb->rxb_repost_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_buffer_post(rxb);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
kptl_tx_t, tx_list);
cfs_list_del_init(&tx->tx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock, flags);
kptllnd_tx_fini(tx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
continue;
}
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
- &waitlink);
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
- flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kptllnd_data.kptl_sched_waitq,
+ &waitlink);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ flags);
- if (!did_something)
- cfs_waitq_wait(&waitlink, CFS_TASK_INTERRUPTIBLE);
- else
- cfs_cond_resched();
+ if (!did_something)
+ waitq_wait(&waitlink, TASK_INTERRUPTIBLE);
+ else
+ cond_resched();
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kptllnd_data.kptl_sched_waitq, &waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
- counter = 0;
- }
+ counter = 0;
+ }
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
kptllnd_thread_fini();
return 0;