1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Isaac Huang <isaac@clusterfs.com>
8 #define DEBUG_SUBSYSTEM S_LNET
10 #include <libcfs/kp30.h>
11 #include <libcfs/libcfs.h>
12 #include <lnet/lib-lnet.h>
16 struct smoketest_workitem {
17 struct list_head wi_runq; /* concurrent workitems */
18 struct list_head wi_serial_runq; /* serialised workitems */
19 cfs_waitq_t wi_waitq; /* where schedulers sleep */
20 cfs_waitq_t wi_serial_waitq; /* where serial scheduler sleep */
21 spinlock_t wi_lock; /* serialize */
27 swi_sched_cansleep (struct list_head *q)
31 spin_lock(&swi_data.wi_lock);
33 rc = !swi_data.wi_shuttingdown && list_empty(q);
35 spin_unlock(&swi_data.wi_lock);
40 * 0. it only works when called from wi->wi_action.
41 * 1. when it returns no one shall try to schedule the workitem.
44 swi_kill_workitem (swi_workitem_t *wi)
46 LASSERT (!in_interrupt()); /* because we use plain spinlock */
47 LASSERT (!swi_data.wi_shuttingdown);
49 spin_lock(&swi_data.wi_lock);
52 LASSERT (wi->wi_running);
55 if (wi->wi_scheduled) { /* cancel pending schedules */
56 LASSERT (!list_empty(&wi->wi_list));
57 list_del_init(&wi->wi_list);
60 LASSERT (list_empty(&wi->wi_list));
61 wi->wi_scheduled = 1; /* LBUG future schedule attempts */
63 spin_unlock(&swi_data.wi_lock);
68 swi_schedule_workitem (swi_workitem_t *wi)
70 LASSERT (!in_interrupt()); /* because we use plain spinlock */
71 LASSERT (!swi_data.wi_shuttingdown);
73 spin_lock(&swi_data.wi_lock);
75 if (!wi->wi_scheduled) {
76 LASSERT (list_empty(&wi->wi_list));
79 list_add_tail(&wi->wi_list, &swi_data.wi_runq);
80 cfs_waitq_signal(&swi_data.wi_waitq);
83 LASSERT (!list_empty(&wi->wi_list));
84 spin_unlock(&swi_data.wi_lock);
89 * Workitem scheduled by this function is strictly serialised not only with
90 * itself, but also with others scheduled this way.
92 * Now there's only one static serialised queue, but in the future more might
93 * be added, and even dynamic creation of serialised queues might be supported.
96 swi_schedule_serial_workitem (swi_workitem_t *wi)
98 LASSERT (!in_interrupt()); /* because we use plain spinlock */
99 LASSERT (!swi_data.wi_shuttingdown);
101 spin_lock(&swi_data.wi_lock);
103 if (!wi->wi_scheduled) {
104 LASSERT (list_empty(&wi->wi_list));
106 wi->wi_scheduled = 1;
107 list_add_tail(&wi->wi_list, &swi_data.wi_serial_runq);
108 cfs_waitq_signal(&swi_data.wi_serial_waitq);
111 LASSERT (!list_empty(&wi->wi_list));
112 spin_unlock(&swi_data.wi_lock);
119 swi_scheduler_main (void *arg)
124 snprintf(name, sizeof(name), "swi_sd%03d", id);
128 spin_lock(&swi_data.wi_lock);
130 while (!swi_data.wi_shuttingdown) {
135 while (!list_empty(&swi_data.wi_runq) &&
136 nloops < SWI_RESCHED) {
137 wi = list_entry(swi_data.wi_runq.next,
138 swi_workitem_t, wi_list);
139 list_del_init(&wi->wi_list);
141 LASSERT (wi->wi_scheduled);
144 if (wi->wi_running) {
145 list_add_tail(&wi->wi_list, &swi_data.wi_runq);
150 wi->wi_scheduled = 0;
151 spin_unlock(&swi_data.wi_lock);
153 rc = (*wi->wi_action) (wi);
155 spin_lock(&swi_data.wi_lock);
156 if (rc == 0) /* wi still active */
160 spin_unlock(&swi_data.wi_lock);
162 if (nloops < SWI_RESCHED)
163 wait_event_interruptible_exclusive(
165 !swi_sched_cansleep(&swi_data.wi_runq));
169 spin_lock(&swi_data.wi_lock);
172 swi_data.wi_nthreads--;
173 spin_unlock(&swi_data.wi_lock);
178 swi_serial_scheduler_main (void *arg)
182 cfs_daemonize("swi_serial_sd");
185 spin_lock(&swi_data.wi_lock);
187 while (!swi_data.wi_shuttingdown) {
192 while (!list_empty(&swi_data.wi_serial_runq) &&
193 nloops < SWI_RESCHED) {
194 wi = list_entry(swi_data.wi_serial_runq.next,
195 swi_workitem_t, wi_list);
196 list_del_init(&wi->wi_list);
198 LASSERT (!wi->wi_running);
199 LASSERT (wi->wi_scheduled);
203 wi->wi_scheduled = 0;
204 spin_unlock(&swi_data.wi_lock);
206 rc = (*wi->wi_action) (wi);
208 spin_lock(&swi_data.wi_lock);
209 if (rc == 0) /* wi still active */
213 spin_unlock(&swi_data.wi_lock);
215 if (nloops < SWI_RESCHED)
216 wait_event_interruptible_exclusive(
217 swi_data.wi_serial_waitq,
218 !swi_sched_cansleep(&swi_data.wi_serial_runq));
222 spin_lock(&swi_data.wi_lock);
225 swi_data.wi_nthreads--;
226 spin_unlock(&swi_data.wi_lock);
231 swi_start_thread (int (*func) (void*), void *arg)
235 LASSERT (!swi_data.wi_shuttingdown);
237 pid = cfs_kernel_thread(func, arg, 0);
241 spin_lock(&swi_data.wi_lock);
242 swi_data.wi_nthreads++;
243 spin_unlock(&swi_data.wi_lock);
247 #else /* __KERNEL__ */
250 swi_check_events (void)
256 spin_lock(&swi_data.wi_lock);
259 if (!list_empty(&swi_data.wi_serial_runq))
260 q = &swi_data.wi_serial_runq;
261 else if (!list_empty(&swi_data.wi_runq))
262 q = &swi_data.wi_runq;
266 wi = list_entry(q->next, swi_workitem_t, wi_list);
267 list_del_init(&wi->wi_list);
269 LASSERT (wi->wi_scheduled);
270 wi->wi_scheduled = 0;
271 spin_unlock(&swi_data.wi_lock);
274 (*wi->wi_action) (wi);
276 spin_lock(&swi_data.wi_lock);
279 spin_unlock(&swi_data.wi_lock);
291 swi_data.wi_nthreads = 0;
292 swi_data.wi_shuttingdown = 0;
293 spin_lock_init(&swi_data.wi_lock);
294 cfs_waitq_init(&swi_data.wi_waitq);
295 cfs_waitq_init(&swi_data.wi_serial_waitq);
296 CFS_INIT_LIST_HEAD(&swi_data.wi_runq);
297 CFS_INIT_LIST_HEAD(&swi_data.wi_serial_runq);
300 rc = swi_start_thread(swi_serial_scheduler_main, NULL);
302 LASSERT (swi_data.wi_nthreads == 0);
303 CERROR ("Can't spawn serial workitem scheduler: %d\n", rc);
307 for (i = 0; i < num_online_cpus(); i++) {
308 rc = swi_start_thread(swi_scheduler_main, (void *) (long) i);
310 CERROR ("Can't spawn workitem scheduler: %d\n", rc);
326 spin_lock(&swi_data.wi_lock);
328 LASSERT (list_empty(&swi_data.wi_runq));
329 LASSERT (list_empty(&swi_data.wi_serial_runq));
331 swi_data.wi_shuttingdown = 1;
334 cfs_waitq_broadcast(&swi_data.wi_waitq);
335 cfs_waitq_broadcast(&swi_data.wi_serial_waitq);
336 lst_wait_until(swi_data.wi_nthreads == 0, swi_data.wi_lock,
337 "waiting for %d threads to terminate\n",
338 swi_data.wi_nthreads);
341 spin_unlock(&swi_data.wi_lock);