Whamcloud - gitweb
e601840d25e9875afa0251a889aa27b624c5cce6
[fs/lustre-release.git] / lustre / quota / quota_context.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/quota/quota_context.c
5  *  Lustre Quota Context
6  *
7  *  Copyright (c) 2001-2005 Cluster File Systems, Inc.
8  *   Author: Niu YaWei <niu@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   No redistribution or use is permitted outside of Cluster File Systems, Inc.
13  *
14  */
15 #ifndef EXPORT_SYMTAB
16 # define EXPORT_SYMTAB
17 #endif
18
19 #define DEBUG_SUBSYSTEM S_MDS
20
21 #include <linux/version.h>
22 #include <linux/fs.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include "quota_internal.h"
33
34 unsigned long default_bunit_sz = 100 * 1024 * 1024;       /* 100M bytes */
35 unsigned long default_btune_ratio = 50;                   /* 50 percentage */
36 unsigned long default_iunit_sz = 5000;       /* 5000 inodes */
37 unsigned long default_itune_ratio = 50;      /* 50 percentage */
38
39 kmem_cache_t *qunit_cachep = NULL;
40 struct list_head qunit_hash[NR_DQHASH];
41 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
42
43 struct lustre_qunit {
44         struct list_head lq_hash;               /* Hash list in memory */
45         atomic_t lq_refcnt;                     /* Use count */
46         struct lustre_quota_ctxt *lq_ctxt;      /* Quota context this applies to */
47         struct qunit_data lq_data;              /* See qunit_data */
48         unsigned int lq_opc;                    /* QUOTA_DQACQ, QUOTA_DQREL */
49         struct list_head lq_waiters;            /* All write threads waiting for this qunit */
50 };
51
52 int should_translate_quota (struct obd_import *imp)
53 {
54         ENTRY;
55
56         LASSERT(imp);
57         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
58                 RETURN(0);
59         else
60                 RETURN(1);
61 }
62
63 void qunit_cache_cleanup(void)
64 {
65         int i;
66         ENTRY;
67
68         spin_lock(&qunit_hash_lock);
69         for (i = 0; i < NR_DQHASH; i++)
70                 LASSERT(list_empty(qunit_hash + i));
71         spin_unlock(&qunit_hash_lock);
72
73         if (qunit_cachep) {
74 #ifdef HAVE_KMEM_CACHE_DESTROY_INT
75                 int rc;
76                 rc = kmem_cache_destroy(qunit_cachep);
77                 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
78 #else
79                 kmem_cache_destroy(qunit_cachep);
80 #endif
81                 qunit_cachep = NULL;
82         }
83         EXIT;
84 }
85
86 int qunit_cache_init(void)
87 {
88         int i;
89         ENTRY;
90
91         LASSERT(qunit_cachep == NULL);
92         qunit_cachep = kmem_cache_create("ll_qunit_cache",
93                                          sizeof(struct lustre_qunit),
94                                          0, 0, NULL, NULL);
95         if (!qunit_cachep)
96                 RETURN(-ENOMEM);
97
98         spin_lock(&qunit_hash_lock);
99         for (i = 0; i < NR_DQHASH; i++)
100                 INIT_LIST_HEAD(qunit_hash + i);
101         spin_unlock(&qunit_hash_lock);
102         RETURN(0);
103 }
104
105 static inline int
106 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
107              __attribute__((__const__));
108
109 static inline int
110 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
111 {
112         unsigned int id = qdata->qd_id;
113         unsigned int type = qdata->qd_flags & QUOTA_IS_GRP;
114
115         unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
116         tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
117         return tmp;
118 }
119
120 /* caller must hold qunit_hash_lock */
121 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
122                                               struct lustre_quota_ctxt *qctxt,
123                                               struct qunit_data *qdata)
124 {
125         struct lustre_qunit *qunit = NULL;
126         struct qunit_data *tmp;
127
128         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
129         list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
130                 tmp = &qunit->lq_data;
131                 if (qunit->lq_ctxt == qctxt &&
132                     qdata->qd_id == tmp->qd_id && qdata->qd_flags == tmp->qd_flags)
133                         return qunit;
134         }
135         return NULL;
136 }
137
138 /* check_cur_qunit - check the current usage of qunit.
139  * @qctxt: quota context
140  * @qdata: the type of quota unit to be checked
141  *
142  * return: 1 - need acquire qunit;
143  *         2 - need release qunit;
144  *         0 - need do nothing.
145  *       < 0 - error.
146  */
147 static int
148 check_cur_qunit(struct obd_device *obd,
149                 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
150 {
151         struct super_block *sb = qctxt->lqc_sb;
152         unsigned long qunit_sz, tune_sz;
153         __u64 usage, limit;
154         struct obd_quotactl *qctl;
155         int ret = 0;
156         __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
157         __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
158         ENTRY;
159
160         if (!sb_any_quota_enabled(sb))
161                 RETURN(0);
162
163         /* ignore root user */
164         if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
165                 RETURN(0);
166
167         OBD_ALLOC_PTR(qctl);
168         if (qctl == NULL)
169                 RETURN(-ENOMEM);
170
171         /* get fs quota usage & limit */
172         qctl->qc_cmd = Q_GETQUOTA;
173         qctl->qc_id = qdata->qd_id;
174         qctl->qc_type = qdata_type;
175         ret = fsfilt_quotactl(obd, sb, qctl);
176         if (ret) {
177                 if (ret == -ESRCH)      /* no limit */
178                         ret = 0;
179                 else
180                         CERROR("can't get fs quota usage! (rc:%d)\n", ret);
181                 GOTO(out, ret);
182         }
183
184         if (is_blk) {
185                 usage = qctl->qc_dqblk.dqb_curspace;
186                 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
187                 qunit_sz = qctxt->lqc_bunit_sz;
188                 tune_sz = qctxt->lqc_btune_sz;
189
190                 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
191         } else {
192                 usage = qctl->qc_dqblk.dqb_curinodes;
193                 limit = qctl->qc_dqblk.dqb_ihardlimit;
194                 qunit_sz = qctxt->lqc_iunit_sz;
195                 tune_sz = qctxt->lqc_itune_sz;
196         }
197
198         /* ignore the no quota limit case */
199         if (!limit)
200                 GOTO(out, ret = 0);
201
202         /* we don't count the MIN_QLIMIT */
203         if ((limit == MIN_QLIMIT && !is_blk) ||
204             (toqb(limit) == MIN_QLIMIT && is_blk))
205                 limit = 0;
206
207         LASSERT(qdata->qd_count == 0);
208         if (limit <= usage + tune_sz) {
209                 while (qdata->qd_count + limit <= usage + tune_sz)
210                         qdata->qd_count += qunit_sz;
211                 ret = 1;
212         } else if (limit > usage + qunit_sz + tune_sz) {
213                 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz)
214                         qdata->qd_count += qunit_sz;
215                 ret = 2;
216         }
217         LASSERT(ret == 0 || qdata->qd_count);
218         EXIT;
219 out:
220         OBD_FREE_PTR(qctl);
221         return ret;
222 }
223
224 /* caller must hold qunit_hash_lock */
225 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
226                                             struct qunit_data *qdata)
227 {
228         unsigned int hashent = qunit_hashfn(qctxt, qdata);
229         struct lustre_qunit *qunit;
230         ENTRY;
231
232         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
233         qunit = find_qunit(hashent, qctxt, qdata);
234         RETURN(qunit);
235 }
236
237 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
238                                         struct qunit_data *qdata, int opc)
239 {
240         struct lustre_qunit *qunit = NULL;
241         ENTRY;
242
243         OBD_SLAB_ALLOC(qunit, qunit_cachep, SLAB_NOFS, sizeof(*qunit));
244         if (qunit == NULL)
245                 RETURN(NULL);
246
247         INIT_LIST_HEAD(&qunit->lq_hash);
248         INIT_LIST_HEAD(&qunit->lq_waiters);
249         atomic_set(&qunit->lq_refcnt, 1);
250         qunit->lq_ctxt = qctxt;
251         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
252         qunit->lq_opc = opc;
253
254         RETURN(qunit);
255 }
256
257 static inline void free_qunit(struct lustre_qunit *qunit)
258 {
259         OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
260 }
261
262 static inline void qunit_get(struct lustre_qunit *qunit)
263 {
264         atomic_inc(&qunit->lq_refcnt);
265 }
266
267 static void qunit_put(struct lustre_qunit *qunit)
268 {
269         LASSERT(atomic_read(&qunit->lq_refcnt));
270         if (atomic_dec_and_test(&qunit->lq_refcnt))
271                 free_qunit(qunit);
272 }
273
274 static void
275 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
276 {
277         struct list_head *head;
278
279         LASSERT(list_empty(&qunit->lq_hash));
280         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
281         list_add(&qunit->lq_hash, head);
282 }
283
284 static void remove_qunit_nolock(struct lustre_qunit *qunit)
285 {
286         LASSERT(!list_empty(&qunit->lq_hash));
287         list_del_init(&qunit->lq_hash);
288 }
289
290 struct qunit_waiter {
291         struct list_head qw_entry;
292         cfs_waitq_t      qw_waitq;
293         int qw_rc;
294 };
295
296 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
297                                  (limit = count) : (limit += count)
298
299
300 /* FIXME check if this mds is the master of specified id */
301 static int 
302 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt, 
303           unsigned int id, int type)
304 {
305         return qctxt->lqc_handler ? 1 : 0;
306 }
307
308 static int 
309 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
310                struct qunit_data *qdata, int opc, int wait);
311
312 static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
313                                        struct qunit_data *qdata, int opc, int wait)
314 {
315         int rc = 0, ret;
316         struct qunit_data tmp_qdata;
317         ENTRY;
318
319         LASSERT(qdata);
320         if (qctxt->lqc_import)
321                 while (should_translate_quota(qctxt->lqc_import) &&
322                        qdata->qd_count > MAX_QUOTA_COUNT32) {
323
324                         tmp_qdata = *qdata;
325                         tmp_qdata.qd_count = MAX_QUOTA_COUNT32;
326                         qdata->qd_count -= tmp_qdata.qd_count;
327                         ret = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
328                         if (!rc)
329                                 rc = ret;
330                 }
331
332         if (qdata->qd_count){
333                 ret = schedule_dqacq(obd, qctxt, qdata, opc, wait);
334                 if (!rc)
335                         rc = ret;
336         }
337
338         RETURN(rc);
339 }
340
341 static int
342 dqacq_completion(struct obd_device *obd,
343                  struct lustre_quota_ctxt *qctxt,
344                  struct qunit_data *qdata, int rc, int opc)
345 {
346         struct lustre_qunit *qunit = NULL;
347         struct super_block *sb = qctxt->lqc_sb;
348         unsigned long qunit_sz;
349         struct qunit_waiter *qw, *tmp;
350         int err = 0;
351         __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
352         __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
353         __u64 qd_tmp = qdata->qd_count;
354         unsigned long div_r;
355         ENTRY;
356
357         LASSERT(qdata);
358         qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
359         div_r = do_div(qd_tmp, qunit_sz);
360         LASSERT(!div_r);
361
362         /* update local operational quota file */
363         if (rc == 0) {
364                 __u32 count = QUSG(qdata->qd_count, is_blk);
365                 struct obd_quotactl *qctl;
366                 __u64 *hardlimit;
367
368                 OBD_ALLOC_PTR(qctl);
369                 if (qctl == NULL)
370                         GOTO(out, err = -ENOMEM);
371
372                 /* acq/rel qunit for specified uid/gid is serialized,
373                  * so there is no race between get fs quota limit and
374                  * set fs quota limit */
375                 qctl->qc_cmd = Q_GETQUOTA;
376                 qctl->qc_id = qdata->qd_id;
377                 qctl->qc_type = qdata_type;
378                 err = fsfilt_quotactl(obd, sb, qctl);
379                 if (err) {
380                         CERROR("error get quota fs limit! (rc:%d)\n", err);
381                         GOTO(out_mem, err);
382                 }
383
384                 if (is_blk) {
385                         qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
386                         hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
387                 } else {
388                         qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
389                         hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
390                 }
391
392                 switch (opc) {
393                 case QUOTA_DQACQ:
394                         INC_QLIMIT(*hardlimit, count);
395                         break;
396                 case QUOTA_DQREL:
397                         LASSERT(count < *hardlimit);
398                         *hardlimit -= count;
399                         break;
400                 default:
401                         LBUG();
402                 }
403
404                 /* clear quota limit */
405                 if (count == 0)
406                         *hardlimit = 0;
407
408                 qctl->qc_cmd = Q_SETQUOTA;
409                 err = fsfilt_quotactl(obd, sb, qctl);
410                 if (err)
411                         CERROR("error set quota fs limit! (rc:%d)\n", err);
412
413                 QDATA_DEBUG(qdata, "%s completion\n",
414                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
415 out_mem:
416                 OBD_FREE_PTR(qctl);
417         } else if (rc == -EDQUOT) {
418                 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
419         } else if (rc == -EBUSY) {
420                 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
421         } else {
422                 CERROR("acquire qunit got error! (rc:%d)\n", rc);
423         }
424 out:
425         /* remove the qunit from hash */
426         spin_lock(&qunit_hash_lock);
427
428         qunit = dqacq_in_flight(qctxt, qdata);
429         /* this qunit has been removed by qctxt_cleanup() */
430         if (!qunit) {
431                 spin_unlock(&qunit_hash_lock);
432                 RETURN(err);
433         }
434
435         LASSERT(opc == qunit->lq_opc);
436         remove_qunit_nolock(qunit);
437
438         /* wake up all waiters */
439         list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
440                 list_del_init(&qw->qw_entry);
441                 qw->qw_rc = rc;
442                 wake_up(&qw->qw_waitq);
443         }
444
445         spin_unlock(&qunit_hash_lock);
446
447         qunit_put(qunit);
448
449         /* don't reschedule in such cases:
450          *   - acq/rel failure, but not for quota recovery.
451          *   - local dqacq/dqrel.
452          *   - local disk io failure.
453          */
454         if (err || (rc && rc != -EBUSY) || 
455             is_master(obd, qctxt, qdata->qd_id, qdata_type))
456                 RETURN(err);
457
458         /* reschedule another dqacq/dqrel if needed */
459         qdata->qd_count = 0;
460         rc = check_cur_qunit(obd, qctxt, qdata);
461         if (rc > 0) {
462                 int opc;
463                 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
464                 rc = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
465                 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc);
466         }
467         RETURN(err);
468 }
469
470 struct dqacq_async_args {
471         struct lustre_quota_ctxt *aa_ctxt;
472         struct lustre_qunit *aa_qunit;
473 };
474
475 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
476 {
477         struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
478         struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
479         struct lustre_qunit *qunit = aa->aa_qunit;
480         struct obd_device *obd = req->rq_import->imp_obd;
481         struct qunit_data *qdata = NULL;
482         struct qunit_data_old *qdata_old = NULL;
483         ENTRY;
484
485         LASSERT(req);
486         LASSERT(req->rq_import);
487         if ((req->rq_import->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)  &&
488             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
489                 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
490                 qdata = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(*qdata), lustre_swab_qdata);
491         } else {
492                 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
493                 qdata_old = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(struct qunit_data_old),
494                                                lustre_swab_qdata_old);
495                 qdata = lustre_quota_old_to_new(qdata_old);
496         }
497         if (qdata == NULL) {
498                 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
499                 RETURN(-EPROTO);
500         }
501
502         LASSERT(qdata->qd_id == qunit->lq_data.qd_id &&
503                 (qdata->qd_flags & QUOTA_IS_GRP) == (qunit->lq_data.qd_flags & QUOTA_IS_GRP) &&
504                 (qdata->qd_count == qunit->lq_data.qd_count ||
505                  qdata->qd_count == 0));
506
507         QDATA_DEBUG(qdata, "%s interpret rc(%d).\n",
508                     lustre_msg_get_opc(req->rq_reqmsg) == QUOTA_DQACQ ?
509                     "DQACQ" : "DQREL", rc);
510
511         rc = dqacq_completion(obd, qctxt, qdata, rc,
512                               lustre_msg_get_opc(req->rq_reqmsg));
513
514         RETURN(rc);
515 }
516
517 static int got_qunit(struct qunit_waiter *waiter)
518 {
519         int rc = 0;
520         ENTRY;
521         spin_lock(&qunit_hash_lock);
522         rc = list_empty(&waiter->qw_entry);
523         spin_unlock(&qunit_hash_lock);
524         RETURN(rc);
525 }
526
527 static int
528 schedule_dqacq(struct obd_device *obd,
529                struct lustre_quota_ctxt *qctxt,
530                struct qunit_data *qdata, int opc, int wait)
531 {
532         struct lustre_qunit *qunit, *empty;
533         struct qunit_waiter qw;
534         struct l_wait_info lwi = { 0 };
535         struct ptlrpc_request *req;
536         struct qunit_data *reqdata;
537         struct dqacq_async_args *aa;
538         int size[2] = { sizeof(struct ptlrpc_body), sizeof(*reqdata) };
539         int rc = 0;
540         ENTRY;
541
542         INIT_LIST_HEAD(&qw.qw_entry);
543         init_waitqueue_head(&qw.qw_waitq);
544         qw.qw_rc = 0;
545
546         if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
547                 RETURN(-ENOMEM);
548         
549         spin_lock(&qunit_hash_lock);
550
551         qunit = dqacq_in_flight(qctxt, qdata);
552         if (qunit) {
553                 if (wait) 
554                         list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
555                 spin_unlock(&qunit_hash_lock);
556                 
557                 free_qunit(empty);
558                 goto wait_completion;
559         } 
560         qunit = empty;
561         insert_qunit_nolock(qctxt, qunit);
562         if (wait)
563                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
564         spin_unlock(&qunit_hash_lock);
565
566         LASSERT(qunit);
567
568         /* master is going to dqacq/dqrel from itself */
569         if (is_master(obd, qctxt, qdata->qd_id, qdata->qd_flags & QUOTA_IS_GRP)) {
570                 int rc2;
571                 QDATA_DEBUG(qdata, "local %s.\n",
572                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
573                 rc = qctxt->lqc_handler(obd, qdata, opc);
574                 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
575                 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
576         }
577
578         /* build dqacq/dqrel request */
579         LASSERT(qctxt->lqc_import);
580         req = ptlrpc_prep_req(qctxt->lqc_import, LUSTRE_MDS_VERSION, opc, 2,
581                               size, NULL);
582         if (!req) {
583                 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
584                 RETURN(-ENOMEM);
585         }
586
587         LASSERT(!should_translate_quota(qctxt->lqc_import) || 
588                 qdata->qd_count <= MAX_QUOTA_COUNT32);
589         if (should_translate_quota(qctxt->lqc_import) ||
590             OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
591         {
592                 struct qunit_data_old *reqdata_old, *tmp;
593                         
594                 reqdata_old = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF, 
595                                              sizeof(*reqdata_old));
596                 tmp = lustre_quota_new_to_old(qdata);
597                 *reqdata_old = *tmp;
598                 size[1] = sizeof(*reqdata_old);
599                 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
600         } else {
601                 reqdata = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF,
602                                          sizeof(*reqdata));
603                 *reqdata = *qdata;
604                 size[1] = sizeof(*reqdata);
605                 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
606         }
607         ptlrpc_req_set_repsize(req, 2, size);
608
609         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
610         aa = (struct dqacq_async_args *)&req->rq_async_args;
611         aa->aa_ctxt = qctxt;
612         aa->aa_qunit = qunit;
613
614         req->rq_interpret_reply = dqacq_interpret;
615         ptlrpcd_add_req(req);
616
617         QDATA_DEBUG(qdata, "%s scheduled.\n", 
618                     opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
619 wait_completion:
620         if (wait && qunit) {
621                 struct qunit_data *p = &qunit->lq_data;
622                 QDATA_DEBUG(p, "wait for dqacq.\n");
623
624                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
625                 if (qw.qw_rc == 0)
626                         rc = -EAGAIN;
627
628                 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
629         }
630         RETURN(rc);
631 }
632
633 int
634 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
635                    uid_t uid, gid_t gid, __u32 isblk, int wait)
636 {
637         int ret, rc = 0, i = USRQUOTA;
638         __u32 id[MAXQUOTAS] = { uid, gid };
639         struct qunit_data qdata[MAXQUOTAS];
640         ENTRY;
641
642         CLASSERT(MAXQUOTAS < 4);
643         if (!sb_any_quota_enabled(qctxt->lqc_sb))
644                 RETURN(0);
645
646         for (i = 0; i < MAXQUOTAS; i++) {
647                 qdata[i].qd_id = id[i];
648                 qdata[i].qd_flags = 0;
649                 qdata[i].qd_flags |= i;
650                 qdata[i].qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;        
651                 qdata[i].qd_count = 0;
652
653                 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
654                 if (ret > 0) {
655                         int opc;
656                         /* need acquire or release */
657                         opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
658                         ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i], 
659                                                           opc, wait);
660                         if (!rc)
661                                 rc = ret;
662                 }
663         }
664
665         RETURN(rc);
666 }
667
668 int 
669 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
670                          unsigned short type, int isblk)
671 {
672         struct lustre_qunit *qunit = NULL;
673         struct qunit_waiter qw;
674         struct qunit_data qdata;
675         struct l_wait_info lwi = { 0 };
676         ENTRY;
677
678         INIT_LIST_HEAD(&qw.qw_entry);
679         init_waitqueue_head(&qw.qw_waitq);
680         qw.qw_rc = 0;
681
682         qdata.qd_id = id;
683         qdata.qd_flags = 0;
684         qdata.qd_flags |= type;
685         qdata.qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
686         qdata.qd_count = 0;
687
688         spin_lock(&qunit_hash_lock);
689
690         qunit = dqacq_in_flight(qctxt, &qdata);
691         if (qunit)
692                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
693
694         spin_unlock(&qunit_hash_lock);
695
696         if (qunit) {
697                 struct qunit_data *p = &qdata;
698                 QDATA_DEBUG(p, "wait for dqacq completion.\n");
699                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
700                 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
701         }
702         RETURN(0);
703 }
704
705 int
706 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
707            dqacq_handler_t handler)
708 {
709         int rc = 0;
710         ENTRY;
711
712         rc = ptlrpcd_addref();
713         if (rc)
714                 RETURN(rc);
715
716         qctxt->lqc_handler = handler;
717         qctxt->lqc_sb = sb;
718         qctxt->lqc_import = NULL;
719         qctxt->lqc_recovery = 0;
720         qctxt->lqc_atype = 0;
721         qctxt->lqc_status= 0;
722         qctxt->lqc_bunit_sz = default_bunit_sz;
723         qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
724         qctxt->lqc_iunit_sz = default_iunit_sz;
725         qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
726
727         RETURN(0);
728 }
729
730 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
731 {
732         struct lustre_qunit *qunit, *tmp;
733         struct qunit_waiter *qw, *tmp2;
734         int i;
735         ENTRY;
736
737         spin_lock(&qunit_hash_lock);
738
739         for (i = 0; i < NR_DQHASH; i++) {
740                 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
741                         if (qunit->lq_ctxt != qctxt)
742                                 continue;
743
744                         remove_qunit_nolock(qunit);
745                         /* wake up all waiters */
746                         list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters, 
747                                                  qw_entry) {
748                                 list_del_init(&qw->qw_entry);
749                                 qw->qw_rc = 0;
750                                 wake_up(&qw->qw_waitq);
751                         }
752                         qunit_put(qunit);
753                 }
754         }
755
756         spin_unlock(&qunit_hash_lock);
757
758         ptlrpcd_decref();
759
760         EXIT;
761 }
762
763 struct qslave_recov_thread_data {
764         struct obd_device *obd;
765         struct lustre_quota_ctxt *qctxt;
766         struct completion comp;
767 };
768
769 /* FIXME only recovery block quota by now */
770 static int qslave_recovery_main(void *arg)
771 {
772         struct qslave_recov_thread_data *data = arg;
773         struct obd_device *obd = data->obd;
774         struct lustre_quota_ctxt *qctxt = data->qctxt;
775         unsigned int type; 
776         int rc = 0;
777         ENTRY;
778
779         ptlrpc_daemonize("qslave_recovd");
780
781         complete(&data->comp);
782
783         if (qctxt->lqc_recovery)
784                 RETURN(0);
785         qctxt->lqc_recovery = 1;
786
787         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
788                 struct qunit_data qdata;
789                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
790                 struct list_head id_list;
791                 struct dquot_id *dqid, *tmp;
792                 int ret;
793
794                 LOCK_DQONOFF_MUTEX(dqopt);
795                 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
796                         UNLOCK_DQONOFF_MUTEX(dqopt);
797                         break;
798                 }
799
800                 LASSERT(dqopt->files[type] != NULL);
801                 INIT_LIST_HEAD(&id_list);
802 #ifndef KERNEL_SUPPORTS_QUOTA_READ 
803                 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
804 #else
805                 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
806 #endif
807                 UNLOCK_DQONOFF_MUTEX(dqopt);
808                 if (rc)
809                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
810
811                 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
812                         list_del_init(&dqid->di_link);
813                         /* skip slave recovery on itself */
814                         if (is_master(obd, qctxt, dqid->di_id, type))
815                                 goto free;
816                         if (rc && rc != -EBUSY)
817                                 goto free;
818
819                         qdata.qd_id = dqid->di_id;
820                         qdata.qd_flags = 0;
821                         qdata.qd_flags |= type;
822                         qdata.qd_flags |= QUOTA_IS_BLOCK;
823                         qdata.qd_count = 0;
824
825                         ret = check_cur_qunit(obd, qctxt, &qdata);
826                         if (ret > 0) {
827                                 int opc;
828                                 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
829                                 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
830                         } else
831                                 rc = 0;
832
833                         if (rc)
834                                 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR, 
835                                        "qslave recovery failed! (id:%d type:%d "
836                                        " rc:%d)\n", dqid->di_id, type, rc);
837 free:
838                         kfree(dqid);
839                 }
840         }
841
842         qctxt->lqc_recovery = 0;
843         RETURN(rc);
844 }
845
846 void 
847 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
848 {
849         struct qslave_recov_thread_data data;
850         int rc;
851         ENTRY;
852
853         if (!sb_any_quota_enabled(qctxt->lqc_sb))
854                 goto exit;
855
856         data.obd = obd;
857         data.qctxt = qctxt;
858         init_completion(&data.comp);
859
860         rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
861         if (rc < 0) {
862                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
863                 goto exit;
864         }
865         wait_for_completion(&data.comp);
866 exit:
867         EXIT;
868 }
869