Whamcloud - gitweb
use generic LIST_HEAD macros instead of linux specific.
[fs/lustre-release.git] / lustre / quota / quota_context.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/quota/quota_context.c
5  *  Lustre Quota Context
6  *
7  *  Copyright (c) 2001-2005 Cluster File Systems, Inc.
8  *   Author: Niu YaWei <niu@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   No redistribution or use is permitted outside of Cluster File Systems, Inc.
13  *
14  */
15 #ifndef EXPORT_SYMTAB
16 # define EXPORT_SYMTAB
17 #endif
18
19 #define DEBUG_SUBSYSTEM S_MDS
20
21 #include <linux/version.h>
22 #include <linux/fs.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include "quota_internal.h"
33
34 unsigned long default_bunit_sz = 100 * 1024 * 1024;       /* 100M bytes */
35 unsigned long default_btune_ratio = 50;                   /* 50 percentage */
36 unsigned long default_iunit_sz = 5000;       /* 5000 inodes */
37 unsigned long default_itune_ratio = 50;      /* 50 percentage */
38
39 cfs_mem_cache_t *qunit_cachep = NULL;
40 struct list_head qunit_hash[NR_DQHASH];
41 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
42
43 struct lustre_qunit {
44         struct list_head lq_hash;               /* Hash list in memory */
45         atomic_t lq_refcnt;                     /* Use count */
46         struct lustre_quota_ctxt *lq_ctxt;      /* Quota context this applies to */
47         struct qunit_data lq_data;              /* See qunit_data */
48         unsigned int lq_opc;                    /* QUOTA_DQACQ, QUOTA_DQREL */
49         struct list_head lq_waiters;            /* All write threads waiting for this qunit */
50 };
51
52 int should_translate_quota (struct obd_import *imp)
53 {
54         ENTRY;
55
56         LASSERT(imp);
57         if ((imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) && 
58             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
59                 RETURN(0);
60         else
61                 RETURN(1);
62 }
63
64 void qunit_cache_cleanup(void)
65 {
66         int i;
67         ENTRY;
68
69         spin_lock(&qunit_hash_lock);
70         for (i = 0; i < NR_DQHASH; i++)
71                 LASSERT(list_empty(qunit_hash + i));
72         spin_unlock(&qunit_hash_lock);
73
74         if (qunit_cachep) {
75                 int rc;
76                 rc = cfs_mem_cache_destroy(qunit_cachep);
77                 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
78                 qunit_cachep = NULL;
79         }
80         EXIT;
81 }
82
83 int qunit_cache_init(void)
84 {
85         int i;
86         ENTRY;
87
88         LASSERT(qunit_cachep == NULL);
89         qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
90                                             sizeof(struct lustre_qunit),
91                                             0, 0);
92         if (!qunit_cachep)
93                 RETURN(-ENOMEM);
94
95         spin_lock(&qunit_hash_lock);
96         for (i = 0; i < NR_DQHASH; i++)
97                 CFS_INIT_LIST_HEAD(qunit_hash + i);
98         spin_unlock(&qunit_hash_lock);
99         RETURN(0);
100 }
101
102 static inline int
103 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
104              __attribute__((__const__));
105
106 static inline int
107 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
108 {
109         unsigned int id = qdata->qd_id;
110         unsigned int type = qdata->qd_flags & QUOTA_IS_GRP;
111
112         unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
113         tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
114         return tmp;
115 }
116
117 /* compute the remaining quota for certain gid or uid b=11693 */
118 int compute_remquota(struct obd_device *obd,
119                      struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
120 {
121         struct super_block *sb = qctxt->lqc_sb;
122         __u64 usage, limit;
123         struct obd_quotactl *qctl;
124         int ret = QUOTA_RET_OK;
125         __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
126         ENTRY;
127
128         if (!sb_any_quota_enabled(sb))
129                 RETURN(QUOTA_RET_NOQUOTA);
130
131         /* ignore root user */
132         if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
133                 RETURN(QUOTA_RET_NOLIMIT);
134
135         OBD_ALLOC_PTR(qctl);
136         if (qctl == NULL) 
137                 RETURN(-ENOMEM);
138
139         /* get fs quota usage & limit */
140         qctl->qc_cmd = Q_GETQUOTA;
141         qctl->qc_id = qdata->qd_id;
142         qctl->qc_type = qdata_type;
143         ret = fsfilt_quotactl(obd, sb, qctl);
144         if (ret) {
145                 if (ret == -ESRCH)      /* no limit */
146                         ret = QUOTA_RET_NOLIMIT;
147                 else
148                         CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)", 
149                                ret);
150                 GOTO(out, ret);
151         }
152
153         usage = qctl->qc_dqblk.dqb_curspace;
154         limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
155         if (!limit){            /* no limit */
156                 ret = QUOTA_RET_NOLIMIT;
157                 GOTO(out, ret);
158         }
159
160         if (limit >= usage)
161                 qdata->qd_count = limit - usage;
162         else
163                 qdata->qd_count = 0;
164         EXIT;
165 out:
166         OBD_FREE_PTR(qctl);
167         return ret;
168 }
169
170 /* caller must hold qunit_hash_lock */
171 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
172                                               struct lustre_quota_ctxt *qctxt,
173                                               struct qunit_data *qdata)
174 {
175         struct lustre_qunit *qunit = NULL;
176         struct qunit_data *tmp;
177
178         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
179         list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
180                 tmp = &qunit->lq_data;
181                 if (qunit->lq_ctxt == qctxt &&
182                     qdata->qd_id == tmp->qd_id && qdata->qd_flags == tmp->qd_flags)
183                         return qunit;
184         }
185         return NULL;
186 }
187
188 /* check_cur_qunit - check the current usage of qunit.
189  * @qctxt: quota context
190  * @qdata: the type of quota unit to be checked
191  *
192  * return: 1 - need acquire qunit;
193  *         2 - need release qunit;
194  *         0 - need do nothing.
195  *       < 0 - error.
196  */
197 static int
198 check_cur_qunit(struct obd_device *obd,
199                 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
200 {
201         struct super_block *sb = qctxt->lqc_sb;
202         unsigned long qunit_sz, tune_sz;
203         __u64 usage, limit;
204         struct obd_quotactl *qctl;
205         int ret = 0;
206         __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
207         __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
208         ENTRY;
209
210         if (!sb_any_quota_enabled(sb))
211                 RETURN(0);
212
213         OBD_ALLOC_PTR(qctl);
214         if (qctl == NULL)
215                 RETURN(-ENOMEM);
216
217         /* get fs quota usage & limit */
218         qctl->qc_cmd = Q_GETQUOTA;
219         qctl->qc_id = qdata->qd_id;
220         qctl->qc_type = qdata_type;
221         ret = fsfilt_quotactl(obd, sb, qctl);
222         if (ret) {
223                 if (ret == -ESRCH)      /* no limit */
224                         ret = 0;
225                 else
226                         CERROR("can't get fs quota usage! (rc:%d)\n", ret);
227                 GOTO(out, ret);
228         }
229
230         if (is_blk) {
231                 usage = qctl->qc_dqblk.dqb_curspace;
232                 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
233                 qunit_sz = qctxt->lqc_bunit_sz;
234                 tune_sz = qctxt->lqc_btune_sz;
235
236                 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
237         } else {
238                 usage = qctl->qc_dqblk.dqb_curinodes;
239                 limit = qctl->qc_dqblk.dqb_ihardlimit;
240                 qunit_sz = qctxt->lqc_iunit_sz;
241                 tune_sz = qctxt->lqc_itune_sz;
242         }
243
244         /* ignore the no quota limit case */
245         if (!limit)
246                 GOTO(out, ret = 0);
247
248         /* we don't count the MIN_QLIMIT */
249         if ((limit == MIN_QLIMIT && !is_blk) ||
250             (toqb(limit) == MIN_QLIMIT && is_blk))
251                 limit = 0;
252
253         LASSERT(qdata->qd_count == 0);
254         if (limit <= usage + tune_sz) {
255                 while (qdata->qd_count + limit <= usage + tune_sz)
256                         qdata->qd_count += qunit_sz;
257                 ret = 1;
258         } else if (limit > usage + qunit_sz + tune_sz) {
259                 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz)
260                         qdata->qd_count += qunit_sz;
261                 ret = 2;
262         }
263         LASSERT(ret == 0 || qdata->qd_count);
264         EXIT;
265 out:
266         OBD_FREE_PTR(qctl);
267         return ret;
268 }
269
270 /* caller must hold qunit_hash_lock */
271 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
272                                             struct qunit_data *qdata)
273 {
274         unsigned int hashent = qunit_hashfn(qctxt, qdata);
275         struct lustre_qunit *qunit;
276         ENTRY;
277
278         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
279         qunit = find_qunit(hashent, qctxt, qdata);
280         RETURN(qunit);
281 }
282
283 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
284                                         struct qunit_data *qdata, int opc)
285 {
286         struct lustre_qunit *qunit = NULL;
287         ENTRY;
288
289         OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
290         if (qunit == NULL)
291                 RETURN(NULL);
292
293         CFS_INIT_LIST_HEAD(&qunit->lq_hash);
294         CFS_INIT_LIST_HEAD(&qunit->lq_waiters);
295         atomic_set(&qunit->lq_refcnt, 1);
296         qunit->lq_ctxt = qctxt;
297         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
298         qunit->lq_opc = opc;
299
300         RETURN(qunit);
301 }
302
303 static inline void free_qunit(struct lustre_qunit *qunit)
304 {
305         OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
306 }
307
308 static inline void qunit_get(struct lustre_qunit *qunit)
309 {
310         atomic_inc(&qunit->lq_refcnt);
311 }
312
313 static void qunit_put(struct lustre_qunit *qunit)
314 {
315         LASSERT(atomic_read(&qunit->lq_refcnt));
316         if (atomic_dec_and_test(&qunit->lq_refcnt))
317                 free_qunit(qunit);
318 }
319
320 static void
321 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
322 {
323         struct list_head *head;
324
325         LASSERT(list_empty(&qunit->lq_hash));
326         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
327         list_add(&qunit->lq_hash, head);
328 }
329
330 static void remove_qunit_nolock(struct lustre_qunit *qunit)
331 {
332         LASSERT(!list_empty(&qunit->lq_hash));
333         list_del_init(&qunit->lq_hash);
334 }
335
336 struct qunit_waiter {
337         struct list_head qw_entry;
338         cfs_waitq_t      qw_waitq;
339         int qw_rc;
340 };
341
342 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
343                                  (limit = count) : (limit += count)
344
345
346 /* FIXME check if this mds is the master of specified id */
347 static int
348 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
349           unsigned int id, int type)
350 {
351         return qctxt->lqc_handler ? 1 : 0;
352 }
353
354 static int
355 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
356                struct qunit_data *qdata, int opc, int wait);
357
358 static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
359                                        struct qunit_data *qdata, int opc, int wait)
360 {
361         int rc = 0;
362         unsigned long factor;
363         struct qunit_data tmp_qdata;
364         ENTRY;
365
366         LASSERT(qdata && qdata->qd_count);
367         QDATA_DEBUG(qdata, "%s quota split.\n",
368                     (qdata->qd_flags & QUOTA_IS_BLOCK) ? "block" : "inode");
369         if (qdata->qd_flags & QUOTA_IS_BLOCK)
370                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz * 
371                         qctxt->lqc_bunit_sz;
372         else
373                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz * 
374                         qctxt->lqc_iunit_sz;
375
376         if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
377             qdata->qd_count > factor) {
378                         tmp_qdata = *qdata;
379                 tmp_qdata.qd_count = factor;
380                         qdata->qd_count -= tmp_qdata.qd_count;
381                 QDATA_DEBUG((&tmp_qdata), "be split.\n");
382                 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
383         } else{
384                 QDATA_DEBUG(qdata, "don't be split.\n");
385                 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
386         }
387
388         RETURN(rc);
389 }
390
391 static int
392 dqacq_completion(struct obd_device *obd,
393                  struct lustre_quota_ctxt *qctxt,
394                  struct qunit_data *qdata, int rc, int opc)
395 {
396         struct lustre_qunit *qunit = NULL;
397         struct super_block *sb = qctxt->lqc_sb;
398         unsigned long qunit_sz;
399         struct qunit_waiter *qw, *tmp;
400         int err = 0;
401         __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
402         __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
403         __u64 qd_tmp = qdata->qd_count;
404         unsigned long div_r;
405         ENTRY;
406
407         LASSERT(qdata);
408         qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
409         div_r = do_div(qd_tmp, qunit_sz);
410         LASSERTF(!div_r, "qunit_sz: %lu, return qunit_sz: "LPU64"\n",
411                  qunit_sz, qd_tmp);
412
413         /* update local operational quota file */
414         if (rc == 0) {
415                 __u32 count = QUSG(qdata->qd_count, is_blk);
416                 struct obd_quotactl *qctl;
417                 __u64 *hardlimit;
418
419                 OBD_ALLOC_PTR(qctl);
420                 if (qctl == NULL)
421                         GOTO(out, err = -ENOMEM);
422
423                 /* acq/rel qunit for specified uid/gid is serialized,
424                  * so there is no race between get fs quota limit and
425                  * set fs quota limit */
426                 qctl->qc_cmd = Q_GETQUOTA;
427                 qctl->qc_id = qdata->qd_id;
428                 qctl->qc_type = qdata_type;
429                 err = fsfilt_quotactl(obd, sb, qctl);
430                 if (err) {
431                         CERROR("error get quota fs limit! (rc:%d)\n", err);
432                         GOTO(out_mem, err);
433                 }
434
435                 if (is_blk) {
436                         qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
437                         hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
438                 } else {
439                         qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
440                         hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
441                 }
442
443                 switch (opc) {
444                 case QUOTA_DQACQ:
445                         CDEBUG(D_QUOTA, "%s(acq):count: %d, hardlimt: "LPU64 
446                                ",type: %s.\n", obd->obd_name, count, *hardlimit, 
447                                qdata_type ? "grp": "usr");
448                         INC_QLIMIT(*hardlimit, count);
449                         break;
450                 case QUOTA_DQREL:
451                         CDEBUG(D_QUOTA, "%s(rel):count: %d, hardlimt: "LPU64 
452                                ",type: %s.\n", obd->obd_name, count, *hardlimit, 
453                                qdata_type ? "grp": "usr");
454                         LASSERTF(count < *hardlimit, 
455                                  "count: %d, hardlimit: "LPU64".\n", 
456                                  count, *hardlimit);
457                         *hardlimit -= count;
458                         break;
459                 default:
460                         LBUG();
461                 }
462
463                 /* clear quota limit */
464                 if (count == 0)
465                         *hardlimit = 0;
466
467                 qctl->qc_cmd = Q_SETQUOTA;
468                 err = fsfilt_quotactl(obd, sb, qctl);
469                 if (err)
470                         CERROR("error set quota fs limit! (rc:%d)\n", err);
471
472                 QDATA_DEBUG(qdata, "%s completion\n",
473                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
474 out_mem:
475                 OBD_FREE_PTR(qctl);
476         } else if (rc == -EDQUOT) {
477                 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
478         } else if (rc == -EBUSY) {
479                 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
480         } else {
481                 CERROR("acquire qunit got error! (rc:%d)\n", rc);
482         }
483 out:
484         /* remove the qunit from hash */
485         spin_lock(&qunit_hash_lock);
486
487         qunit = dqacq_in_flight(qctxt, qdata);
488         /* this qunit has been removed by qctxt_cleanup() */
489         if (!qunit) {
490                 spin_unlock(&qunit_hash_lock);
491                 RETURN(err);
492         }
493
494         LASSERT(opc == qunit->lq_opc);
495         remove_qunit_nolock(qunit);
496
497         /* wake up all waiters */
498         list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
499                 list_del_init(&qw->qw_entry);
500                 qw->qw_rc = rc;
501                 wake_up(&qw->qw_waitq);
502         }
503
504         spin_unlock(&qunit_hash_lock);
505
506         qunit_put(qunit);
507
508         /* don't reschedule in such cases:
509          *   - acq/rel failure, but not for quota recovery.
510          *   - local dqacq/dqrel.
511          *   - local disk io failure.
512          */
513         if (err || (rc && rc != -EBUSY) || 
514             is_master(obd, qctxt, qdata->qd_id, qdata_type))
515                 RETURN(err);
516
517         /* reschedule another dqacq/dqrel if needed */
518         qdata->qd_count = 0;
519         rc = check_cur_qunit(obd, qctxt, qdata);
520         if (rc > 0) {
521                 int opc;
522                 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
523                 rc = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
524                 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc);
525         }
526         RETURN(err);
527 }
528
529 struct dqacq_async_args {
530         struct lustre_quota_ctxt *aa_ctxt;
531         struct lustre_qunit *aa_qunit;
532 };
533
534 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
535 {
536         struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
537         struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
538         struct lustre_qunit *qunit = aa->aa_qunit;
539         struct obd_device *obd = req->rq_import->imp_obd;
540         struct qunit_data *qdata = NULL;
541         struct qunit_data_old *qdata_old = NULL;
542         ENTRY;
543
544         LASSERT(req);
545         LASSERT(req->rq_import);
546
547         if ((req->rq_import->imp_connect_data.ocd_connect_flags &
548              OBD_CONNECT_QUOTA64) &&
549             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
550                 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
551
552                 qdata = req_capsule_server_swab_get(&req->rq_pill,
553                                                     &RMF_QUNIT_DATA,
554                                           (void*)lustre_swab_qdata);
555         } else {
556                 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
557
558                 qdata = req_capsule_server_swab_get(&req->rq_pill,
559                                                     &RMF_QUNIT_DATA,
560                                        (void*)lustre_swab_qdata_old);
561                 qdata = lustre_quota_old_to_new(qdata_old);
562         }
563         if (qdata == NULL) {
564                 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data");
565                 RETURN(-EPROTO);
566         }
567
568         LASSERT(qdata->qd_id == qunit->lq_data.qd_id &&
569                 (qdata->qd_flags & QUOTA_IS_GRP) ==
570                  (qunit->lq_data.qd_flags & QUOTA_IS_GRP) &&
571                 (qdata->qd_count == qunit->lq_data.qd_count ||
572                  qdata->qd_count == 0));
573
574         QDATA_DEBUG(qdata, "%s interpret rc(%d).\n",
575                     lustre_msg_get_opc(req->rq_reqmsg) == QUOTA_DQACQ ?
576                     "DQACQ" : "DQREL", rc);
577
578         rc = dqacq_completion(obd, qctxt, qdata, rc,
579                               lustre_msg_get_opc(req->rq_reqmsg));
580
581         RETURN(rc);
582 }
583
584 static int got_qunit(struct qunit_waiter *waiter)
585 {
586         int rc = 0;
587         ENTRY;
588         spin_lock(&qunit_hash_lock);
589         rc = list_empty(&waiter->qw_entry);
590         spin_unlock(&qunit_hash_lock);
591         RETURN(rc);
592 }
593
594 static int
595 schedule_dqacq(struct obd_device *obd,
596                struct lustre_quota_ctxt *qctxt,
597                struct qunit_data *qdata, int opc, int wait)
598 {
599         struct lustre_qunit *qunit, *empty;
600         struct qunit_waiter qw;
601         struct l_wait_info lwi = { 0 };
602         struct ptlrpc_request *req;
603         struct qunit_data *reqdata;
604         struct dqacq_async_args *aa;
605         unsigned long factor;   
606         int rc = 0;
607         ENTRY;
608
609         CFS_INIT_LIST_HEAD(&qw.qw_entry);
610         init_waitqueue_head(&qw.qw_waitq);
611         qw.qw_rc = 0;
612
613         if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
614                 RETURN(-ENOMEM);
615
616         spin_lock(&qunit_hash_lock);
617
618         qunit = dqacq_in_flight(qctxt, qdata);
619         if (qunit) {
620                 if (wait)
621                         list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
622                 spin_unlock(&qunit_hash_lock);
623
624                 free_qunit(empty);
625                 goto wait_completion;
626         }
627         qunit = empty;
628         insert_qunit_nolock(qctxt, qunit);
629         if (wait)
630                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
631         spin_unlock(&qunit_hash_lock);
632
633         LASSERT(qunit);
634
635         /* master is going to dqacq/dqrel from itself */
636         if (is_master(obd, qctxt, qdata->qd_id, qdata->qd_flags & QUOTA_IS_GRP))
637         {
638                 int rc2;
639                 QDATA_DEBUG(qdata, "local %s.\n",
640                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
641                 rc = qctxt->lqc_handler(obd, qdata, opc);
642                 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
643                 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
644         }
645
646         /* build dqacq/dqrel request */
647         LASSERT(qctxt->lqc_import);
648
649         req = ptlrpc_request_alloc_pack(qctxt->lqc_import, &RQF_MDS_QUOTA_DQACQ,
650                                         LUSTRE_MDS_VERSION, opc);
651         if (req == NULL) {
652                 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
653                 RETURN(-ENOMEM);
654         }
655
656         if (qdata->qd_flags & QUOTA_IS_BLOCK)
657                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz * 
658                          qctxt->lqc_bunit_sz;
659         else
660                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz * 
661                          qctxt->lqc_iunit_sz;
662
663         LASSERT(!should_translate_quota(qctxt->lqc_import) || 
664                 qdata->qd_count <= factor);
665         if (should_translate_quota(qctxt->lqc_import))
666         {
667                 struct qunit_data_old *reqdata_old, *tmp;
668                         
669                 reqdata_old = req_capsule_client_get(&req->rq_pill,
670                                                      &RMF_QUNIT_DATA);
671
672                 tmp = lustre_quota_new_to_old(qdata);
673                 *reqdata_old = *tmp;
674                 req_capsule_set_size(&req->rq_pill, &RMF_QUNIT_DATA, RCL_SERVER,
675                                      sizeof(*reqdata_old));
676                 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
677         } else {
678                 reqdata = req_capsule_client_get(&req->rq_pill,
679                                                  &RMF_QUNIT_DATA);
680
681                 *reqdata = *qdata;
682                 req_capsule_set_size(&req->rq_pill, &RMF_QUNIT_DATA, RCL_SERVER,
683                                      sizeof(*reqdata));
684                 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
685         }
686         ptlrpc_request_set_replen(req);
687
688         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
689         aa = (struct dqacq_async_args *)&req->rq_async_args;
690         aa->aa_ctxt = qctxt;
691         aa->aa_qunit = qunit;
692
693         req->rq_interpret_reply = dqacq_interpret;
694         ptlrpcd_add_req(req);
695
696         QDATA_DEBUG(qdata, "%s scheduled.\n",
697                     opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
698 wait_completion:
699         if (wait && qunit) {
700                 struct qunit_data *p = &qunit->lq_data;
701                 QDATA_DEBUG(p, "wait for dqacq.\n");
702
703                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
704                 if (qw.qw_rc == 0)
705                         rc = -EAGAIN;
706
707                 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
708         }
709         RETURN(rc);
710 }
711
712 int
713 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
714                    uid_t uid, gid_t gid, __u32 isblk, int wait)
715 {
716         int ret, rc = 0, i = USRQUOTA;
717         __u32 id[MAXQUOTAS] = { uid, gid };
718         struct qunit_data qdata[MAXQUOTAS];
719         ENTRY;
720
721         CLASSERT(MAXQUOTAS < 4);
722         if (!sb_any_quota_enabled(qctxt->lqc_sb))
723                 RETURN(0);
724
725         for (i = 0; i < MAXQUOTAS; i++) {
726                 qdata[i].qd_id = id[i];
727                 qdata[i].qd_flags = 0;
728                 qdata[i].qd_flags |= i;
729                 qdata[i].qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;        
730                 qdata[i].qd_count = 0;
731
732                 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
733                 if (ret > 0) {
734                         int opc;
735                         /* need acquire or release */
736                         opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
737                         ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i], 
738                                                           opc, wait);
739                         if (!rc)
740                                 rc = ret;
741                 }
742         }
743
744         RETURN(rc);
745 }
746
747 int
748 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
749                          unsigned short type, int isblk)
750 {
751         struct lustre_qunit *qunit = NULL;
752         struct qunit_waiter qw;
753         struct qunit_data qdata;
754         struct l_wait_info lwi = { 0 };
755         ENTRY;
756
757         CFS_INIT_LIST_HEAD(&qw.qw_entry);
758         init_waitqueue_head(&qw.qw_waitq);
759         qw.qw_rc = 0;
760
761         qdata.qd_id = id;
762         qdata.qd_flags = 0;
763         qdata.qd_flags |= type;
764         qdata.qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
765         qdata.qd_count = 0;
766
767         spin_lock(&qunit_hash_lock);
768
769         qunit = dqacq_in_flight(qctxt, &qdata);
770         if (qunit)
771                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
772
773         spin_unlock(&qunit_hash_lock);
774
775         if (qunit) {
776                 struct qunit_data *p = &qdata;
777                 QDATA_DEBUG(p, "wait for dqacq completion.\n");
778                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
779                 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
780         }
781         RETURN(0);
782 }
783
784 int
785 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
786            dqacq_handler_t handler)
787 {
788         int rc = 0;
789         ENTRY;
790
791         rc = ptlrpcd_addref();
792         if (rc)
793                 RETURN(rc);
794
795         qctxt->lqc_handler = handler;
796         qctxt->lqc_sb = sb;
797         qctxt->lqc_import = NULL;
798         qctxt->lqc_recovery = 0;
799         qctxt->lqc_atype = 0;
800         qctxt->lqc_status= 0;
801         qctxt->lqc_bunit_sz = default_bunit_sz;
802         qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
803         qctxt->lqc_iunit_sz = default_iunit_sz;
804         qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
805
806         RETURN(0);
807 }
808
809 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
810 {
811         struct lustre_qunit *qunit, *tmp;
812         struct qunit_waiter *qw, *tmp2;
813         int i;
814         ENTRY;
815
816         spin_lock(&qunit_hash_lock);
817
818         for (i = 0; i < NR_DQHASH; i++) {
819                 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
820                         if (qunit->lq_ctxt != qctxt)
821                                 continue;
822
823                         remove_qunit_nolock(qunit);
824                         /* wake up all waiters */
825                         list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
826                                                  qw_entry) {
827                                 list_del_init(&qw->qw_entry);
828                                 qw->qw_rc = 0;
829                                 wake_up(&qw->qw_waitq);
830                         }
831                         qunit_put(qunit);
832                 }
833         }
834
835         spin_unlock(&qunit_hash_lock);
836
837         ptlrpcd_decref();
838
839         EXIT;
840 }
841
842 struct qslave_recov_thread_data {
843         struct obd_device *obd;
844         struct lustre_quota_ctxt *qctxt;
845         struct completion comp;
846 };
847
848 /* FIXME only recovery block quota by now */
849 static int qslave_recovery_main(void *arg)
850 {
851         struct qslave_recov_thread_data *data = arg;
852         struct obd_device *obd = data->obd;
853         struct lustre_quota_ctxt *qctxt = data->qctxt;
854         unsigned int type;
855         int rc = 0;
856         ENTRY;
857
858         ptlrpc_daemonize("qslave_recovd");
859
860         complete(&data->comp);
861
862         if (qctxt->lqc_recovery)
863                 RETURN(0);
864         qctxt->lqc_recovery = 1;
865
866         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
867                 struct qunit_data qdata;
868                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
869                 struct list_head id_list;
870                 struct dquot_id *dqid, *tmp;
871                 int ret;
872
873                 LOCK_DQONOFF_MUTEX(dqopt);
874                 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
875                         UNLOCK_DQONOFF_MUTEX(dqopt);
876                         break;
877                 }
878
879                 LASSERT(dqopt->files[type] != NULL);
880                 CFS_INIT_LIST_HEAD(&id_list);
881 #ifndef KERNEL_SUPPORTS_QUOTA_READ 
882                 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
883 #else
884                 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
885 #endif
886                 UNLOCK_DQONOFF_MUTEX(dqopt);
887                 if (rc)
888                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
889
890                 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
891                         list_del_init(&dqid->di_link);
892                         /* skip slave recovery on itself */
893                         if (is_master(obd, qctxt, dqid->di_id, type))
894                                 goto free;
895                         if (rc && rc != -EBUSY)
896                                 goto free;
897
898                         qdata.qd_id = dqid->di_id;
899                         qdata.qd_flags = 0;
900                         qdata.qd_flags |= type;
901                         qdata.qd_flags |= QUOTA_IS_BLOCK;
902                         qdata.qd_count = 0;
903
904                         ret = check_cur_qunit(obd, qctxt, &qdata);
905                         if (ret > 0) {
906                                 int opc;
907                                 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
908                                 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
909                         } else
910                                 rc = 0;
911
912                         if (rc)
913                                 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
914                                        "qslave recovery failed! (id:%d type:%d "
915                                        " rc:%d)\n", dqid->di_id, type, rc);
916 free:
917                         kfree(dqid);
918                 }
919         }
920
921         qctxt->lqc_recovery = 0;
922         RETURN(rc);
923 }
924
925 void
926 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
927 {
928         struct qslave_recov_thread_data data;
929         int rc;
930         ENTRY;
931
932         if (!sb_any_quota_enabled(qctxt->lqc_sb))
933                 goto exit;
934
935         data.obd = obd;
936         data.qctxt = qctxt;
937         init_completion(&data.comp);
938
939         rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
940         if (rc < 0) {
941                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
942                 goto exit;
943         }
944         wait_for_completion(&data.comp);
945 exit:
946         EXIT;
947 }
948