Whamcloud - gitweb
b56d26623aca8985d8f57590b288af95f4c8d75f
[fs/lustre-release.git] / libcfs / libcfs / workitem.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/workitem.c
37  *
38  * Author: Isaac Huang <isaac@clusterfs.com>
39  *         Liang Zhen  <zhen.liang@sun.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LNET
43
44 #include <libcfs/libcfs.h>
45
46 #define CFS_WS_NAME_LEN         16
47
48 typedef struct cfs_wi_sched {
49         cfs_list_t              ws_list;        /* chain on global list */
50 #ifdef __KERNEL__
51         /** serialised workitems */
52         spinlock_t              ws_lock;
53         /** where schedulers sleep */
54         cfs_waitq_t             ws_waitq;
55 #endif
56         /** concurrent workitems */
57         cfs_list_t              ws_runq;
58         /** rescheduled running-workitems, a workitem can be rescheduled
59          * while running in wi_action(), but we don't to execute it again
60          * unless it returns from wi_action(), so we put it on ws_rerunq
61          * while rescheduling, and move it to runq after it returns
62          * from wi_action() */
63         cfs_list_t              ws_rerunq;
64         /** CPT-table for this scheduler */
65         struct cfs_cpt_table    *ws_cptab;
66         /** CPT id for affinity */
67         int                     ws_cpt;
68         /** number of scheduled workitems */
69         int                     ws_nscheduled;
70         /** started scheduler thread, protected by cfs_wi_data::wi_glock */
71         unsigned int            ws_nthreads:30;
72         /** shutting down, protected by cfs_wi_data::wi_glock */
73         unsigned int            ws_stopping:1;
74         /** serialize starting thread, protected by cfs_wi_data::wi_glock */
75         unsigned int            ws_starting:1;
76         /** scheduler name */
77         char                    ws_name[CFS_WS_NAME_LEN];
78 } cfs_wi_sched_t;
79
80 struct cfs_workitem_data {
81         /** serialize */
82         spinlock_t              wi_glock;
83         /** list of all schedulers */
84         cfs_list_t              wi_scheds;
85         /** WI module is initialized */
86         int                     wi_init;
87         /** shutting down the whole WI module */
88         int                     wi_stopping;
89 } cfs_wi_data;
90
91 #ifdef __KERNEL__
92 static inline void
93 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
94 {
95         spin_lock(&sched->ws_lock);
96 }
97
98 static inline void
99 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
100 {
101         spin_unlock(&sched->ws_lock);
102 }
103
104 static inline int
105 cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
106 {
107         cfs_wi_sched_lock(sched);
108         if (sched->ws_stopping) {
109                 cfs_wi_sched_unlock(sched);
110                 return 0;
111         }
112
113         if (!cfs_list_empty(&sched->ws_runq)) {
114                 cfs_wi_sched_unlock(sched);
115                 return 0;
116         }
117         cfs_wi_sched_unlock(sched);
118         return 1;
119 }
120
121 #else /* !__KERNEL__ */
122
123 static inline void
124 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
125 {
126         spin_lock(&cfs_wi_data.wi_glock);
127 }
128
129 static inline void
130 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
131 {
132         spin_unlock(&cfs_wi_data.wi_glock);
133 }
134
135 #endif /* __KERNEL__ */
136
137 /* XXX:
138  * 0. it only works when called from wi->wi_action.
139  * 1. when it returns no one shall try to schedule the workitem.
140  */
141 void
142 cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
143 {
144         LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
145         LASSERT(!sched->ws_stopping);
146
147         cfs_wi_sched_lock(sched);
148
149 #ifdef __KERNEL__
150         LASSERT(wi->wi_running);
151 #endif
152         if (wi->wi_scheduled) { /* cancel pending schedules */
153                 LASSERT(!cfs_list_empty(&wi->wi_list));
154                 cfs_list_del_init(&wi->wi_list);
155
156                 LASSERT(sched->ws_nscheduled > 0);
157                 sched->ws_nscheduled--;
158         }
159
160         LASSERT(cfs_list_empty(&wi->wi_list));
161
162         wi->wi_scheduled = 1; /* LBUG future schedule attempts */
163         cfs_wi_sched_unlock(sched);
164
165         return;
166 }
167 EXPORT_SYMBOL(cfs_wi_exit);
168
169 /**
170  * cancel schedule request of workitem \a wi
171  */
172 int
173 cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
174 {
175         int     rc;
176
177         LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
178         LASSERT(!sched->ws_stopping);
179
180         /*
181          * return 0 if it's running already, otherwise return 1, which
182          * means the workitem will not be scheduled and will not have
183          * any race with wi_action.
184          */
185         cfs_wi_sched_lock(sched);
186
187         rc = !(wi->wi_running);
188
189         if (wi->wi_scheduled) { /* cancel pending schedules */
190                 LASSERT(!cfs_list_empty(&wi->wi_list));
191                 cfs_list_del_init(&wi->wi_list);
192
193                 LASSERT(sched->ws_nscheduled > 0);
194                 sched->ws_nscheduled--;
195
196                 wi->wi_scheduled = 0;
197         }
198
199         LASSERT (cfs_list_empty(&wi->wi_list));
200
201         cfs_wi_sched_unlock(sched);
202         return rc;
203 }
204 EXPORT_SYMBOL(cfs_wi_deschedule);
205
206 /*
207  * Workitem scheduled with (serial == 1) is strictly serialised not only with
208  * itself, but also with others scheduled this way.
209  *
210  * Now there's only one static serialised queue, but in the future more might
211  * be added, and even dynamic creation of serialised queues might be supported.
212  */
213 void
214 cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
215 {
216         LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
217         LASSERT(!sched->ws_stopping);
218
219         cfs_wi_sched_lock(sched);
220
221         if (!wi->wi_scheduled) {
222                 LASSERT (cfs_list_empty(&wi->wi_list));
223
224                 wi->wi_scheduled = 1;
225                 sched->ws_nscheduled++;
226                 if (!wi->wi_running) {
227                         cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
228 #ifdef __KERNEL__
229                         cfs_waitq_signal(&sched->ws_waitq);
230 #endif
231                 } else {
232                         cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
233                 }
234         }
235
236         LASSERT (!cfs_list_empty(&wi->wi_list));
237         cfs_wi_sched_unlock(sched);
238         return;
239 }
240 EXPORT_SYMBOL(cfs_wi_schedule);
241
242 #ifdef __KERNEL__
243
244 static int
245 cfs_wi_scheduler (void *arg)
246 {
247         struct cfs_wi_sched     *sched = (cfs_wi_sched_t *)arg;
248
249         cfs_block_allsigs();
250
251         /* CPT affinity scheduler? */
252         if (sched->ws_cptab != NULL)
253                 if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
254                         CWARN("Failed to bind %s on CPT %d\n",
255                                 sched->ws_name, sched->ws_cpt);
256
257         spin_lock(&cfs_wi_data.wi_glock);
258
259         LASSERT(sched->ws_starting == 1);
260         sched->ws_starting--;
261         sched->ws_nthreads++;
262
263         spin_unlock(&cfs_wi_data.wi_glock);
264
265         cfs_wi_sched_lock(sched);
266
267         while (!sched->ws_stopping) {
268                 int             nloops = 0;
269                 int             rc;
270                 cfs_workitem_t *wi;
271
272                 while (!cfs_list_empty(&sched->ws_runq) &&
273                        nloops < CFS_WI_RESCHED) {
274                         wi = cfs_list_entry(sched->ws_runq.next,
275                                             cfs_workitem_t, wi_list);
276                         LASSERT(wi->wi_scheduled && !wi->wi_running);
277
278                         cfs_list_del_init(&wi->wi_list);
279
280                         LASSERT(sched->ws_nscheduled > 0);
281                         sched->ws_nscheduled--;
282
283                         wi->wi_running   = 1;
284                         wi->wi_scheduled = 0;
285
286
287                         cfs_wi_sched_unlock(sched);
288                         nloops++;
289
290                         rc = (*wi->wi_action) (wi);
291
292                         cfs_wi_sched_lock(sched);
293                         if (rc != 0) /* WI should be dead, even be freed! */
294                                 continue;
295
296                         wi->wi_running = 0;
297                         if (cfs_list_empty(&wi->wi_list))
298                                 continue;
299
300                         LASSERT(wi->wi_scheduled);
301                         /* wi is rescheduled, should be on rerunq now, we
302                          * move it to runq so it can run action now */
303                         cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
304                 }
305
306                 if (!cfs_list_empty(&sched->ws_runq)) {
307                         cfs_wi_sched_unlock(sched);
308                         /* don't sleep because some workitems still
309                          * expect me to come back soon */
310                         cfs_cond_resched();
311                         cfs_wi_sched_lock(sched);
312                         continue;
313                 }
314
315                 cfs_wi_sched_unlock(sched);
316                 rc = wait_event_interruptible_exclusive(sched->ws_waitq,
317                                 !cfs_wi_sched_cansleep(sched));
318                 cfs_wi_sched_lock(sched);
319         }
320
321         cfs_wi_sched_unlock(sched);
322
323         spin_lock(&cfs_wi_data.wi_glock);
324         sched->ws_nthreads--;
325         spin_unlock(&cfs_wi_data.wi_glock);
326
327         return 0;
328 }
329
330 #else /* __KERNEL__ */
331
332 int
333 cfs_wi_check_events (void)
334 {
335         int               n = 0;
336         cfs_workitem_t   *wi;
337
338         spin_lock(&cfs_wi_data.wi_glock);
339
340         for (;;) {
341                 struct cfs_wi_sched     *sched = NULL;
342                 struct cfs_wi_sched     *tmp;
343
344                 /** rerunq is always empty for userspace */
345                 cfs_list_for_each_entry(tmp,
346                                         &cfs_wi_data.wi_scheds, ws_list) {
347                         if (!cfs_list_empty(&tmp->ws_runq)) {
348                                 sched = tmp;
349                                 break;
350                         }
351                 }
352
353                 if (sched == NULL)
354                         break;
355
356                 wi = cfs_list_entry(sched->ws_runq.next,
357                                     cfs_workitem_t, wi_list);
358                 cfs_list_del_init(&wi->wi_list);
359
360                 LASSERT(sched->ws_nscheduled > 0);
361                 sched->ws_nscheduled--;
362
363                 LASSERT(wi->wi_scheduled);
364                 wi->wi_scheduled = 0;
365                 spin_unlock(&cfs_wi_data.wi_glock);
366
367                 n++;
368                 (*wi->wi_action) (wi);
369
370                 spin_lock(&cfs_wi_data.wi_glock);
371         }
372
373         spin_unlock(&cfs_wi_data.wi_glock);
374         return n;
375 }
376
377 #endif
378
379 void
380 cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
381 {
382         LASSERT(cfs_wi_data.wi_init);
383         LASSERT(!cfs_wi_data.wi_stopping);
384
385         spin_lock(&cfs_wi_data.wi_glock);
386         if (sched->ws_stopping) {
387                 CDEBUG(D_INFO, "%s is in progress of stopping\n",
388                        sched->ws_name);
389                 spin_unlock(&cfs_wi_data.wi_glock);
390                 return;
391         }
392
393         LASSERT(!cfs_list_empty(&sched->ws_list));
394         sched->ws_stopping = 1;
395
396         spin_unlock(&cfs_wi_data.wi_glock);
397
398 #ifdef __KERNEL__
399         cfs_waitq_broadcast(&sched->ws_waitq);
400
401         spin_lock(&cfs_wi_data.wi_glock);
402         {
403                 int i = 2;
404
405                 while (sched->ws_nthreads > 0) {
406                         CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
407                                "waiting for %d threads of WI sched[%s] to "
408                                "terminate\n", sched->ws_nthreads,
409                                sched->ws_name);
410
411                         spin_unlock(&cfs_wi_data.wi_glock);
412                         cfs_pause(cfs_time_seconds(1) / 20);
413                         spin_lock(&cfs_wi_data.wi_glock);
414                 }
415         }
416
417         cfs_list_del(&sched->ws_list);
418
419         spin_unlock(&cfs_wi_data.wi_glock);
420 #endif
421         LASSERT(sched->ws_nscheduled == 0);
422
423         LIBCFS_FREE(sched, sizeof(*sched));
424 }
425 EXPORT_SYMBOL(cfs_wi_sched_destroy);
426
427 int
428 cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
429                     int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
430 {
431         struct cfs_wi_sched     *sched;
432
433         LASSERT(cfs_wi_data.wi_init);
434         LASSERT(!cfs_wi_data.wi_stopping);
435         LASSERT(cptab == NULL || cpt == CFS_CPT_ANY ||
436                 (cpt >= 0 && cpt < cfs_cpt_number(cptab)));
437
438         LIBCFS_ALLOC(sched, sizeof(*sched));
439         if (sched == NULL)
440                 return -ENOMEM;
441
442         strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
443         sched->ws_cptab = cptab;
444         sched->ws_cpt = cpt;
445
446 #ifdef __KERNEL__
447         spin_lock_init(&sched->ws_lock);
448         cfs_waitq_init(&sched->ws_waitq);
449 #endif
450         CFS_INIT_LIST_HEAD(&sched->ws_runq);
451         CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
452         CFS_INIT_LIST_HEAD(&sched->ws_list);
453
454 #ifdef __KERNEL__
455         for (; nthrs > 0; nthrs--)  {
456                 char            name[16];
457                 cfs_task_t      *task;
458
459                 spin_lock(&cfs_wi_data.wi_glock);
460                 while (sched->ws_starting > 0) {
461                         spin_unlock(&cfs_wi_data.wi_glock);
462                         cfs_schedule();
463                         spin_lock(&cfs_wi_data.wi_glock);
464                 }
465
466                 sched->ws_starting++;
467                 spin_unlock(&cfs_wi_data.wi_glock);
468
469                 if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
470                         snprintf(name, sizeof(name), "%s_%02d_%02d",
471                                  sched->ws_name, sched->ws_cpt,
472                                  sched->ws_nthreads);
473                 } else {
474                         snprintf(name, sizeof(name), "%s_%02d",
475                                  sched->ws_name, sched->ws_nthreads);
476                 }
477
478                 task = kthread_run(cfs_wi_scheduler, sched, name);
479                 if (IS_ERR(task)) {
480                         int rc = PTR_ERR(task);
481
482                         CERROR("Failed to create thread for "
483                                 "WI scheduler %s: %d\n", name, rc);
484
485                         spin_lock(&cfs_wi_data.wi_glock);
486
487                         /* make up for cfs_wi_sched_destroy */
488                         cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
489                         sched->ws_starting--;
490
491                         spin_unlock(&cfs_wi_data.wi_glock);
492
493                         cfs_wi_sched_destroy(sched);
494                         return rc;
495                 }
496         }
497 #endif
498         spin_lock(&cfs_wi_data.wi_glock);
499         cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
500         spin_unlock(&cfs_wi_data.wi_glock);
501
502         *sched_pp = sched;
503         return 0;
504 }
505 EXPORT_SYMBOL(cfs_wi_sched_create);
506
507 int
508 cfs_wi_startup(void)
509 {
510         memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
511
512         spin_lock_init(&cfs_wi_data.wi_glock);
513         CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
514         cfs_wi_data.wi_init = 1;
515
516         return 0;
517 }
518
519 void
520 cfs_wi_shutdown (void)
521 {
522         struct cfs_wi_sched     *sched;
523
524         spin_lock(&cfs_wi_data.wi_glock);
525         cfs_wi_data.wi_stopping = 1;
526         spin_unlock(&cfs_wi_data.wi_glock);
527
528 #ifdef __KERNEL__
529         /* nobody should contend on this list */
530         cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
531                 sched->ws_stopping = 1;
532                 cfs_waitq_broadcast(&sched->ws_waitq);
533         }
534
535         cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
536                 spin_lock(&cfs_wi_data.wi_glock);
537
538                 while (sched->ws_nthreads != 0) {
539                         spin_unlock(&cfs_wi_data.wi_glock);
540                         cfs_pause(cfs_time_seconds(1) / 20);
541                         spin_lock(&cfs_wi_data.wi_glock);
542                 }
543                 spin_unlock(&cfs_wi_data.wi_glock);
544         }
545 #endif
546         while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
547                 sched = cfs_list_entry(cfs_wi_data.wi_scheds.next,
548                                        struct cfs_wi_sched, ws_list);
549                 cfs_list_del(&sched->ws_list);
550                 LIBCFS_FREE(sched, sizeof(*sched));
551         }
552
553         cfs_wi_data.wi_stopping = 0;
554         cfs_wi_data.wi_init = 0;
555 }