Whamcloud - gitweb
LU-15283 quota: deadlock between reint & lquota_wb
[fs/lustre-release.git] / lustre / quota / qsd_writeback.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA
20  *
21  * GPL HEADER END
22  */
23 /*
24  * Copyright (c) 2012, 2017, Intel Corporation.
25  * Use is subject to license terms.
26  *
27  * Author: Johann Lombardi <johann.lombardi@intel.com>
28  * Author: Niu    Yawei    <yawei.niu@intel.com>
29  */
30
31 #define DEBUG_SUBSYSTEM S_LQUOTA
32
33 #include <linux/kthread.h>
34 #include "qsd_internal.h"
35
36 /*
37  * Allocate and fill an qsd_upd_rec structure to be processed by the writeback
38  * thread.
39  *
40  * \param qqi - is the qsd_qtype_info structure relevant to the update
41  * \param lqe - is the lquota entry subject to the update
42  * \param qid - is the identifier subject to the update
43  * \param rec - is the record storing the new quota settings
44  * \param ver - is the version associated with the update
45  * \param global - is a boolean set to true if this is an update of the global
46  *                 index and false for a slave index.
47  */
48 static struct qsd_upd_rec *qsd_upd_alloc(struct qsd_qtype_info *qqi,
49                                          struct lquota_entry *lqe,
50                                          union lquota_id *qid,
51                                          union lquota_rec *rec, __u64 ver,
52                                          bool global)
53 {
54         struct qsd_upd_rec      *upd;
55
56         OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, GFP_NOFS);
57         if (upd == NULL) {
58                 return NULL;
59         }
60
61         /* fill it */
62         INIT_LIST_HEAD(&upd->qur_link);
63         upd->qur_qqi = qqi;
64         upd->qur_lqe = lqe;
65         if (lqe)
66                 lqe_getref(lqe);
67         upd->qur_qid    = *qid;
68         upd->qur_rec    = *rec;
69         upd->qur_ver    = ver;
70         upd->qur_global = global;
71
72         return upd;
73 }
74
75 static void qsd_upd_free(struct qsd_upd_rec *upd)
76 {
77         if (upd->qur_lqe)
78                 lqe_putref(upd->qur_lqe);
79         OBD_SLAB_FREE_PTR(upd, upd_kmem);
80 }
81
82 /* must hold the qsd_lock */
83 static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
84 {
85         if (!qsd->qsd_stopping) {
86                 list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
87                 /* wake up the upd thread */
88                 if (qsd->qsd_upd_task)
89                         wake_up_process(qsd->qsd_upd_task);
90         } else {
91                 CWARN("%s: discard update.\n", qsd->qsd_svname);
92                 if (upd->qur_lqe)
93                         LQUOTA_WARN(upd->qur_lqe, "discard update.");
94                 qsd_upd_free(upd);
95         }
96 }
97
98 /* must hold the qsd_lock */
99 static void qsd_add_deferred(struct qsd_instance *qsd, struct list_head *list,
100                              struct qsd_upd_rec *upd)
101 {
102         struct qsd_upd_rec      *tmp, *n;
103
104         if (qsd->qsd_stopping) {
105                 CWARN("%s: discard deferred udpate.\n", qsd->qsd_svname);
106                 if (upd->qur_lqe)
107                         LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
108                 qsd_upd_free(upd);
109                 return;
110         }
111
112         /* Sort the updates in ascending order */
113         list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
114
115                 /* There could be some legacy records which have duplicated
116                  * version. Imagine following scenario: slave received global
117                  * glimpse and queued a record in the deferred list, then
118                  * master crash and rollback to an ealier version, then the
119                  * version of queued record will be conflicting with later
120                  * updates. We should just delete the legacy record in such
121                  * case. */
122                 if (upd->qur_ver == tmp->qur_ver) {
123                         if (tmp->qur_lqe)
124                                 LQUOTA_WARN(tmp->qur_lqe, "Found a conflict "
125                                             "record with ver:%llu",
126                                             tmp->qur_ver);
127                         else
128                                 CWARN("%s: Found a conflict record with ver: "
129                                       "%llu\n", qsd->qsd_svname, tmp->qur_ver);
130
131                         list_del_init(&tmp->qur_link);
132                         qsd_upd_free(tmp);
133                 } else if (upd->qur_ver < tmp->qur_ver) {
134                         continue;
135                 } else {
136                         list_add_tail(&upd->qur_link, &tmp->qur_link);
137                         return;
138                 }
139         }
140         list_add(&upd->qur_link, list);
141 }
142
143 /* must hold the qsd_lock */
144 static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi,
145                                  struct list_head *list, __u64 ver)
146 {
147         struct qsd_upd_rec      *upd, *tmp;
148         ENTRY;
149
150         /* Get the first update record in the list, which has the smallest
151          * version, discard all records with versions smaller than the current
152          * one */
153         list_for_each_entry_safe(upd, tmp, list, qur_link) {
154                 if (upd->qur_ver <= ver) {
155                         /* drop this update */
156                         list_del_init(&upd->qur_link);
157                         CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
158                                "%llu/%llu, global:%d, qid:%llu\n",
159                                qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
160                                upd->qur_global, upd->qur_qid.qid_uid);
161                         qsd_upd_free(upd);
162                 } else {
163                         break;
164                 }
165         }
166
167         /* No remaining deferred update */
168         if (list_empty(list))
169                 RETURN_EXIT;
170
171         CDEBUG(D_QUOTA, "%s: found deferred update record. "
172                "version:%llu/%llu, global:%d, qid:%llu\n",
173                qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
174                upd->qur_global, upd->qur_qid.qid_uid);
175
176         LASSERTF(upd->qur_ver > ver, "lur_ver:%llu, cur_ver:%llu\n",
177                  upd->qur_ver, ver);
178
179         /* Kick off the deferred udpate */
180         if (upd->qur_ver == ver + 1) {
181                 list_del_init(&upd->qur_link);
182                 qsd_upd_add(qqi->qqi_qsd, upd);
183         }
184         EXIT;
185 }
186
187 /* Bump version of global or slave index copy
188  *
189  * \param qqi    - qsd_qtype_info
190  * \param ver    - version to be bumped to
191  * \param global - global or slave index copy?
192  */
193 void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
194 {
195         struct list_head *list;
196         __u64            *idx_ver;
197
198         idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
199         list    = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
200
201         write_lock(&qqi->qqi_qsd->qsd_lock);
202         *idx_ver = ver;
203         if (global)
204                 qqi->qqi_glb_uptodate = 1;
205         else
206                 qqi->qqi_slv_uptodate = 1;
207         qsd_kickoff_deferred(qqi, list, ver);
208         write_unlock(&qqi->qqi_qsd->qsd_lock);
209 }
210
211 /*
212  * Schedule a commit of a lquota entry
213  *
214  * \param  qqi   - qsd_qtype_info
215  * \param  lqe   - lquota_entry
216  * \param  qid   - quota id
217  * \param  rec   - global or slave record to be updated to disk
218  * \param  ver   - new index file version
219  * \param  global- true: master record; false: slave record
220  */
221 void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
222                       union lquota_id *qid, union lquota_rec *rec, __u64 ver,
223                       bool global)
224 {
225         struct qsd_upd_rec      *upd;
226         struct qsd_instance     *qsd = qqi->qqi_qsd;
227         __u64                    cur_ver;
228         ENTRY;
229
230         CDEBUG(D_QUOTA, "%s: schedule update. global:%s, version:%llu\n",
231                qsd->qsd_svname, global ? "true" : "false", ver);
232
233         upd = qsd_upd_alloc(qqi, lqe, qid, rec, ver, global);
234         if (upd == NULL)
235                 RETURN_EXIT;
236
237         /* If we don't want update index version, no need to sort the
238          * records in version order, just schedule the updates instantly. */
239         if (ver == 0) {
240                 write_lock(&qsd->qsd_lock);
241                 qsd_upd_add(qsd, upd);
242                 write_unlock(&qsd->qsd_lock);
243                 RETURN_EXIT;
244         }
245
246         write_lock(&qsd->qsd_lock);
247
248         cur_ver = global ? qqi->qqi_glb_ver : qqi->qqi_slv_ver;
249
250         if (ver <= cur_ver) {
251                 if (global)
252                         /* legitimate race between glimpse AST and
253                          * reintegration */
254                         CDEBUG(D_QUOTA, "%s: discarding glb update from glimpse"
255                                " ver:%llu local ver:%llu\n",
256                                qsd->qsd_svname, ver, cur_ver);
257                 else
258                         CERROR("%s: discard slv update, ver:%llu local ver:"
259                                "%llu\n", qsd->qsd_svname, ver, cur_ver);
260                 qsd_upd_free(upd);
261         } else if ((ver == cur_ver + 1) && qqi->qqi_glb_uptodate &&
262                    qqi->qqi_slv_uptodate) {
263                 /* In order update, and reintegration has been done. */
264                 qsd_upd_add(qsd, upd);
265         } else {
266                 /* Out of order update (the one with smaller version hasn't
267                  * reached slave or hasn't been flushed to disk yet), or
268                  * the reintegration is in progress. Defer the update. */
269                 struct list_head *list = global ? &qqi->qqi_deferred_glb :
270                                                   &qqi->qqi_deferred_slv;
271                 qsd_add_deferred(qsd, list, upd);
272         }
273
274         write_unlock(&qsd->qsd_lock);
275
276         EXIT;
277 }
278
279 static int qsd_process_upd(const struct lu_env *env, struct qsd_upd_rec *upd)
280 {
281         struct lquota_entry     *lqe = upd->qur_lqe;
282         struct qsd_qtype_info   *qqi = upd->qur_qqi;
283         struct qsd_instance     *qsd = qqi->qqi_qsd;
284         int                      rc;
285         ENTRY;
286
287         if (qsd->qsd_exclusive) { /* It could be deadlock running with reint */
288                 read_lock(&qsd->qsd_lock);
289                 rc = qqi->qqi_reint;
290                 read_unlock(&qsd->qsd_lock);
291                 if (rc)
292                         return 1;
293         }
294
295         if (lqe == NULL) {
296                 lqe = lqe_locate(env, qqi->qqi_site, &upd->qur_qid);
297                 if (IS_ERR(lqe))
298                         GOTO(out, rc = PTR_ERR(lqe));
299         }
300
301         /* The in-memory lqe update for slave index copy isn't deferred,
302          * we shouldn't touch it here. */
303         if (upd->qur_global) {
304                 rc = qsd_update_lqe(env, lqe, upd->qur_global, &upd->qur_rec);
305                 if (rc)
306                         GOTO(out, rc);
307                 /* refresh usage */
308                 qsd_refresh_usage(env, lqe);
309
310                 spin_lock(&qsd->qsd_adjust_lock);
311                 lqe->lqe_adjust_time = 0;
312                 spin_unlock(&qsd->qsd_adjust_lock);
313
314                 /* Report usage asynchronously */
315                 rc = qsd_adjust(env, lqe);
316                 if (rc)
317                         LQUOTA_ERROR(lqe, "failed to report usage, rc:%d", rc);
318         }
319
320         rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
321                               upd->qur_ver, &upd->qur_rec);
322 out:
323         if (upd->qur_global && rc == 0 &&
324             upd->qur_rec.lqr_glb_rec.qbr_softlimit == 0 &&
325             upd->qur_rec.lqr_glb_rec.qbr_hardlimit == 0 &&
326             (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) &
327                                                         LQUOTA_FLAG_DEFAULT)) {
328                 lqe->lqe_is_default = true;
329                 if (qqi->qqi_default_softlimit == 0 &&
330                     qqi->qqi_default_hardlimit == 0)
331                         lqe->lqe_enforced = false;
332                 else
333                         lqe->lqe_enforced = true;
334
335                 LQUOTA_DEBUG(lqe, "update to use default quota");
336         }
337
338         if (lqe && !IS_ERR(lqe)) {
339                 lqe_putref(lqe);
340                 upd->qur_lqe = NULL;
341         }
342         RETURN(rc);
343 }
344
345 void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
346 {
347         struct qsd_instance     *qsd = lqe2qqi(lqe)->qqi_qsd;
348         bool                     added = false;
349
350         read_lock(&qsd->qsd_lock);
351         if (qsd->qsd_stopping) {
352                 read_unlock(&qsd->qsd_lock);
353                 return;
354         }
355         read_unlock(&qsd->qsd_lock);
356
357         lqe_getref(lqe);
358         spin_lock(&qsd->qsd_adjust_lock);
359
360         /* the lqe is being queued for the per-ID lock cancel, we should
361          * cancel the lock cancel and re-add it for quota adjust */
362         if (!list_empty(&lqe->lqe_link) &&
363             lqe->lqe_adjust_time == 0) {
364                 list_del_init(&lqe->lqe_link);
365                 lqe_putref(lqe);
366         }
367
368         if (list_empty(&lqe->lqe_link)) {
369                 if (!cancel) {
370                         lqe->lqe_adjust_time = ktime_get_seconds();
371                         if (defer)
372                                 lqe->lqe_adjust_time += QSD_WB_INTERVAL;
373                 } else {
374                         lqe->lqe_adjust_time = 0;
375                 }
376
377                 /* lqe reference transferred to list */
378                 if (defer)
379                         list_add_tail(&lqe->lqe_link,
380                                           &qsd->qsd_adjust_list);
381                 else
382                         list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
383                 added = true;
384         }
385         spin_unlock(&qsd->qsd_adjust_lock);
386
387         if (!added)
388                 lqe_putref(lqe);
389         else {
390                 read_lock(&qsd->qsd_lock);
391                 if (qsd->qsd_upd_task)
392                         wake_up_process(qsd->qsd_upd_task);
393                 read_unlock(&qsd->qsd_lock);
394         }
395 }
396
397 /* return true if there is pending writeback records or the pending
398  * adjust requests */
399 static bool qsd_job_pending(struct qsd_instance *qsd, struct list_head *upd,
400                             bool *uptodate)
401 {
402         bool    job_pending = false;
403         int     qtype;
404
405         LASSERT(list_empty(upd));
406         *uptodate = true;
407
408         spin_lock(&qsd->qsd_adjust_lock);
409         if (!list_empty(&qsd->qsd_adjust_list)) {
410                 struct lquota_entry *lqe;
411                 lqe = list_entry(qsd->qsd_adjust_list.next,
412                                      struct lquota_entry, lqe_link);
413                 if (ktime_get_seconds() >= lqe->lqe_adjust_time)
414                         job_pending = true;
415         }
416         spin_unlock(&qsd->qsd_adjust_lock);
417
418         write_lock(&qsd->qsd_lock);
419         if (!list_empty(&qsd->qsd_upd_list)) {
420                 list_splice_init(&qsd->qsd_upd_list, upd);
421                 job_pending = true;
422         }
423         if (qsd->qsd_exclusive)
424                 qsd->qsd_updating = job_pending;
425
426         for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
427                 struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
428
429                 /* don't bother kicking off reintegration if space accounting
430                  * failed to be enabled */
431                 if (qqi->qqi_acct_failed)
432                         continue;
433
434                 if (!qsd_type_enabled(qsd, qtype))
435                         continue;
436
437                 if ((!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) &&
438                      !qqi->qqi_reint)
439                         /* global or slave index not up to date and reint
440                          * thread not running */
441                         *uptodate = false;
442         }
443
444         write_unlock(&qsd->qsd_lock);
445         return job_pending;
446 }
447
448 struct qsd_upd_args {
449         struct qsd_instance     *qua_inst;
450         struct lu_env            qua_env;
451         struct completion       *qua_started;
452 };
453
454 #ifndef TASK_IDLE
455 /* This identity is only safe inside kernel threads, or other places where
456  * all signals are disabled.  So it is placed here rather than in an include
457  * file.
458  * TASK_IDLE was added in v4.1-rc4-43-g80ed87c8a9ca so this can be removed
459  * when we no longer support kernels older than that.
460  */
461 #define TASK_IDLE TASK_INTERRUPTIBLE
462 #endif
463
464 static int qsd_upd_thread(void *_args)
465 {
466         struct qsd_upd_args     *args = _args;
467         struct qsd_instance     *qsd = args->qua_inst;
468         LIST_HEAD(queue);
469         struct qsd_upd_rec      *upd, *n;
470         struct lu_env           *env = &args->qua_env;
471         int                      qtype, rc = 0;
472         bool                     uptodate;
473         struct lquota_entry     *lqe;
474         time64_t cur_time;
475         ENTRY;
476
477         complete(args->qua_started);
478         while (({set_current_state(TASK_IDLE);
479                  !kthread_should_stop(); })) {
480                 int count = 0;
481
482                 if (!qsd_job_pending(qsd, &queue, &uptodate))
483                         schedule_timeout(cfs_time_seconds(QSD_WB_INTERVAL));
484                 __set_current_state(TASK_RUNNING);
485
486                 while (1) {
487                         list_for_each_entry_safe(upd, n, &queue, qur_link) {
488                                 if (qsd_process_upd(env, upd) <= 0) {
489                                         list_del_init(&upd->qur_link);
490                                         qsd_upd_free(upd);
491                                 }
492                         }
493                         if (list_empty(&queue))
494                                 break;
495                         count++;
496                         if (count % 7 == 0) {
497                                 n = list_entry(&queue, struct qsd_upd_rec,
498                                                qur_link);
499                                 CWARN("%s: The reintegration thread [%d] "
500                                       "blocked more than %ld seconds\n",
501                                       n->qur_qqi->qqi_qsd->qsd_svname,
502                                       n->qur_qqi->qqi_qtype, count *
503                                       cfs_time_seconds(QSD_WB_INTERVAL) / 10);
504                         }
505                         schedule_timeout_interruptible(
506                                 cfs_time_seconds(QSD_WB_INTERVAL) / 10);
507                 }
508                 if (qsd->qsd_exclusive) {
509                         write_lock(&qsd->qsd_lock);
510                         qsd->qsd_updating = false;
511                         write_unlock(&qsd->qsd_lock);
512                 }
513
514                 spin_lock(&qsd->qsd_adjust_lock);
515                 cur_time = ktime_get_seconds();
516                 while (!list_empty(&qsd->qsd_adjust_list)) {
517                         lqe = list_entry(qsd->qsd_adjust_list.next,
518                                          struct lquota_entry, lqe_link);
519                         /* deferred items are sorted by time */
520                         if (lqe->lqe_adjust_time > cur_time)
521                                 break;
522
523                         list_del_init(&lqe->lqe_link);
524                         spin_unlock(&qsd->qsd_adjust_lock);
525
526                         if (!kthread_should_stop() && uptodate) {
527                                 qsd_refresh_usage(env, lqe);
528                                 if (lqe->lqe_adjust_time == 0)
529                                         qsd_id_lock_cancel(env, lqe);
530                                 else
531                                         qsd_adjust(env, lqe);
532                         }
533
534                         lqe_putref(lqe);
535                         spin_lock(&qsd->qsd_adjust_lock);
536                 }
537                 spin_unlock(&qsd->qsd_adjust_lock);
538
539                 if (uptodate || kthread_should_stop())
540                         continue;
541
542                 for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++)
543                         qsd_start_reint_thread(qsd->qsd_type_array[qtype]);
544         }
545         __set_current_state(TASK_RUNNING);
546
547         lu_env_fini(env);
548         OBD_FREE_PTR(args);
549
550         RETURN(rc);
551 }
552
553 int qsd_start_upd_thread(struct qsd_instance *qsd)
554 {
555         struct qsd_upd_args *args;
556         struct task_struct *task;
557         DECLARE_COMPLETION_ONSTACK(started);
558         int rc;
559         ENTRY;
560
561         OBD_ALLOC_PTR(args);
562         if (args == NULL)
563                 RETURN(-ENOMEM);
564
565         rc = lu_env_init(&args->qua_env, LCT_DT_THREAD);
566         if (rc) {
567                 CERROR("%s: cannot init env: rc = %d\n", qsd->qsd_svname, rc);
568                 goto out_free;
569         }
570         args->qua_inst = qsd;
571         args->qua_started = &started;
572
573         task = kthread_create(qsd_upd_thread, args,
574                               "lquota_wb_%s", qsd->qsd_svname);
575         if (IS_ERR(task)) {
576                 rc = PTR_ERR(task);
577                 CERROR("fail to start quota update thread: rc = %d\n", rc);
578                 goto out_fini;
579         }
580         qsd->qsd_upd_task = task;
581         wake_up_process(task);
582         wait_for_completion(&started);
583
584         RETURN(0);
585
586 out_fini:
587         lu_env_fini(&args->qua_env);
588 out_free:
589         OBD_FREE_PTR(args);
590         RETURN(rc);
591 }
592
593 static void qsd_cleanup_deferred(struct qsd_instance *qsd)
594 {
595         int     qtype;
596
597         for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
598                 struct qsd_upd_rec      *upd, *tmp;
599                 struct qsd_qtype_info   *qqi = qsd->qsd_type_array[qtype];
600
601                 if (qqi == NULL)
602                         continue;
603
604                 write_lock(&qsd->qsd_lock);
605                 list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
606                                          qur_link) {
607                         CWARN("%s: Free global deferred upd: ID:%llu, "
608                               "ver:%llu/%llu\n", qsd->qsd_svname,
609                               upd->qur_qid.qid_uid, upd->qur_ver,
610                               qqi->qqi_glb_ver);
611                         list_del_init(&upd->qur_link);
612                         qsd_upd_free(upd);
613                 }
614                 list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
615                                          qur_link) {
616                         CWARN("%s: Free slave deferred upd: ID:%llu, "
617                               "ver:%llu/%llu\n", qsd->qsd_svname,
618                               upd->qur_qid.qid_uid, upd->qur_ver,
619                               qqi->qqi_slv_ver);
620                         list_del_init(&upd->qur_link);
621                         qsd_upd_free(upd);
622                 }
623                 write_unlock(&qsd->qsd_lock);
624         }
625 }
626
627 static void qsd_cleanup_adjust(struct qsd_instance *qsd)
628 {
629         struct lquota_entry     *lqe;
630
631         spin_lock(&qsd->qsd_adjust_lock);
632         while (!list_empty(&qsd->qsd_adjust_list)) {
633                 lqe = list_entry(qsd->qsd_adjust_list.next,
634                                  struct lquota_entry, lqe_link);
635                 list_del_init(&lqe->lqe_link);
636                 lqe_putref(lqe);
637         }
638         spin_unlock(&qsd->qsd_adjust_lock);
639 }
640
641 void qsd_stop_upd_thread(struct qsd_instance *qsd)
642 {
643         struct task_struct *task;
644
645         write_lock(&qsd->qsd_lock);
646         task = qsd->qsd_upd_task;
647         qsd->qsd_upd_task = NULL;
648         write_unlock(&qsd->qsd_lock);
649         if (task)
650                 kthread_stop(task);
651
652         qsd_cleanup_deferred(qsd);
653         qsd_cleanup_adjust(qsd);
654 }