/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
+ * Copyright (c) 2012, Intel Corporation.
+ *
* Author: Eric Barton <eric@bartonsoftware.com>
*
* This file is part of Portals, http://www.lustre.org
void
kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
{
- unsigned long flags;
+ unsigned long flags;
- kqswnal_unmap_tx (ktx); /* release temporary mappings */
- ktx->ktx_state = KTX_IDLE;
+ kqswnal_unmap_tx(ktx); /* release temporary mappings */
+ ktx->ktx_state = KTX_IDLE;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
- cfs_list_del (&ktx->ktx_list); /* take off active list */
- cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+ cfs_list_del(&ktx->ktx_list); /* take off active list */
+ cfs_list_add(&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
}
kqswnal_tx_t *
kqswnal_get_idle_tx (void)
{
- unsigned long flags;
- kqswnal_tx_t *ktx;
+ unsigned long flags;
+ kqswnal_tx_t *ktx;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
- if (kqswnal_data.kqn_shuttingdown ||
- cfs_list_empty (&kqswnal_data.kqn_idletxds)) {
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock,
- flags);
+ if (kqswnal_data.kqn_shuttingdown ||
+ cfs_list_empty(&kqswnal_data.kqn_idletxds)) {
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
- return NULL;
- }
+ return NULL;
+ }
ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t,
ktx_list);
ktx->ktx_launcher = current->pid;
cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
/* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
LASSERT (ktx->ktx_nmappedpages == 0);
}
/* Complete the send in thread context */
- cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail(&ktx->ktx_schedlist,
- &kqswnal_data.kqn_donetxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_donetxds);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
static void
return (0);
case EP_ENOMEM: /* can't allocate ep txd => queue for later */
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail (&ktx->ktx_schedlist,
- &kqswnal_data.kqn_delayedtxds);
- cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_delayedtxds);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
return (0);
return;
}
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
int
}
int
-kqswnal_thread_start (int (*fn)(void *arg), void *arg)
+kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ struct task_struct *task = cfs_thread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
- return (0);
+ cfs_atomic_inc(&kqswnal_data.kqn_nthreads);
+ return 0;
}
void
int counter = 0;
int did_something;
- cfs_daemonize ("kqswnal_sched");
cfs_block_allsigs ();
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
for (;;)
{
krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
kqswnal_rx_t, krx_list);
cfs_list_del (&krx->krx_list);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
LASSERT (krx->krx_state == KRX_PARSE);
kqswnal_parse (krx);
did_something = 1;
- cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
kqswnal_tx_t, ktx_schedlist);
cfs_list_del_init (&ktx->ktx_schedlist);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
kqswnal_tx_done_in_thread_context(ktx);
did_something = 1;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
kqswnal_tx_t, ktx_schedlist);
cfs_list_del_init (&ktx->ktx_schedlist);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
rc = kqswnal_launch (ktx);
cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
did_something = 1;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
/* nothing to do or hogging CPU */
if (!did_something || counter++ == KQSW_RESCHED) {
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
counter = 0;
* when there's nothing left to do */
break;
}
- cfs_wait_event_interruptible_exclusive (
- kqswnal_data.kqn_sched_waitq,
- kqswnal_data.kqn_shuttingdown == 2 ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_readyrxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_donetxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_delayedtxds, rc));
- LASSERT (rc == 0);
- } else if (need_resched())
- cfs_schedule ();
-
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
- flags);
- }
- }
-
- kqswnal_thread_fini ();
- return (0);
+ rc = wait_event_interruptible_exclusive (
+ kqswnal_data.kqn_sched_waitq,
+ kqswnal_data.kqn_shuttingdown == 2 ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_readyrxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_donetxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_delayedtxds));
+ LASSERT (rc == 0);
+ } else if (need_resched())
+ schedule ();
+
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+ flags);
+ }
+ }
+
+ kqswnal_thread_fini ();
+ return 0;
}