Whamcloud - gitweb
b=10600
[fs/lustre-release.git] / lustre / quota / quota_context.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/quota/quota_context.c
5  *  Lustre Quota Context
6  *
7  *  Copyright (c) 2001-2005 Cluster File Systems, Inc.
8  *   Author: Niu YaWei <niu@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   No redistribution or use is permitted outside of Cluster File Systems, Inc.
13  *
14  */
15 #ifndef EXPORT_SYMTAB
16 # define EXPORT_SYMTAB
17 #endif
18
19 #define DEBUG_SUBSYSTEM S_MDS
20
21 #include <linux/version.h>
22 #include <linux/fs.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include <class_hash.h>
33 #include "quota_internal.h"
34
35 extern struct lustre_hash_operations lqs_hash_operations;
36
37 unsigned long default_bunit_sz = 128 * 1024 * 1024;       /* 128M bytes */
38 unsigned long default_btune_ratio = 50;                   /* 50 percentage */
39 unsigned long default_iunit_sz = 5120;       /* 5120 inodes */
40 unsigned long default_itune_ratio = 50;      /* 50 percentage */
41
42 cfs_mem_cache_t *qunit_cachep = NULL;
43 struct list_head qunit_hash[NR_DQHASH];
44 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
45
46 struct lustre_qunit {
47         struct list_head lq_hash;               /* Hash list in memory */
48         atomic_t lq_refcnt;                     /* Use count */
49         struct lustre_quota_ctxt *lq_ctxt;      /* Quota context this applies to */
50         struct qunit_data lq_data;              /* See qunit_data */
51         unsigned int lq_opc;                    /* QUOTA_DQACQ, QUOTA_DQREL */
52         struct list_head lq_waiters;            /* All write threads waiting for this qunit */
53 };
54
55 int should_translate_quota (struct obd_import *imp)
56 {
57         ENTRY;
58
59         LASSERT(imp);
60 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(1, 7, 0, 0)
61         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64 &&
62             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
63 #else
64         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
65 #endif
66                 RETURN(0);
67         else
68                 RETURN(1);
69 }
70
71 void qunit_cache_cleanup(void)
72 {
73         int i;
74         ENTRY;
75
76         spin_lock(&qunit_hash_lock);
77         for (i = 0; i < NR_DQHASH; i++)
78                 LASSERT(list_empty(qunit_hash + i));
79         spin_unlock(&qunit_hash_lock);
80
81         if (qunit_cachep) {
82                 int rc;
83                 rc = cfs_mem_cache_destroy(qunit_cachep);
84                 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
85                 qunit_cachep = NULL;
86         }
87         EXIT;
88 }
89
90 int qunit_cache_init(void)
91 {
92         int i;
93         ENTRY;
94
95         LASSERT(qunit_cachep == NULL);
96         qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
97                                             sizeof(struct lustre_qunit),
98                                             0, 0);
99         if (!qunit_cachep)
100                 RETURN(-ENOMEM);
101
102         spin_lock(&qunit_hash_lock);
103         for (i = 0; i < NR_DQHASH; i++)
104                 INIT_LIST_HEAD(qunit_hash + i);
105         spin_unlock(&qunit_hash_lock);
106         RETURN(0);
107 }
108
109 static inline int
110 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
111              __attribute__((__const__));
112
113 static inline int
114 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
115 {
116         unsigned int id = qdata->qd_id;
117         unsigned int type = QDATA_IS_GRP(qdata);
118
119         unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
120         tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
121         return tmp;
122 }
123
124 /* caller must hold qunit_hash_lock */
125 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
126                                               struct lustre_quota_ctxt *qctxt,
127                                               struct qunit_data *qdata)
128 {
129         struct lustre_qunit *qunit = NULL;
130         struct qunit_data *tmp;
131
132         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
133         list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
134                 tmp = &qunit->lq_data;
135                 if (qunit->lq_ctxt == qctxt &&
136                     qdata->qd_id == tmp->qd_id &&
137                     (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
138                     (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
139                         return qunit;
140         }
141         return NULL;
142 }
143
144 /* check_cur_qunit - check the current usage of qunit.
145  * @qctxt: quota context
146  * @qdata: the type of quota unit to be checked
147  *
148  * return: 1 - need acquire qunit;
149  *         2 - need release qunit;
150  *         0 - need do nothing.
151  *       < 0 - error.
152  */
153 static int
154 check_cur_qunit(struct obd_device *obd,
155                 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
156 {
157         struct super_block *sb = qctxt->lqc_sb;
158         unsigned long qunit_sz, tune_sz;
159         __u64 usage, limit, limit_org, pending_write = 0;
160         long long record = 0;
161         struct obd_quotactl *qctl;
162         struct lustre_qunit_size *lqs = NULL;
163         int ret = 0;
164         ENTRY;
165
166         if (!sb_any_quota_enabled(sb))
167                 RETURN(0);
168
169         OBD_ALLOC_PTR(qctl);
170         if (qctl == NULL)
171                 RETURN(-ENOMEM);
172
173         /* get fs quota usage & limit */
174         qctl->qc_cmd = Q_GETQUOTA;
175         qctl->qc_id = qdata->qd_id;
176         qctl->qc_type = QDATA_IS_GRP(qdata);
177         ret = fsfilt_quotactl(obd, sb, qctl);
178         if (ret) {
179                 if (ret == -ESRCH)      /* no limit */
180                         ret = 0;
181                 else
182                         CERROR("can't get fs quota usage! (rc:%d)\n", ret);
183                 GOTO(out, ret);
184         }
185
186         if (QDATA_IS_BLK(qdata)) {
187                 usage = qctl->qc_dqblk.dqb_curspace;
188                 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
189         } else {
190                 usage = qctl->qc_dqblk.dqb_curinodes;
191                 limit = qctl->qc_dqblk.dqb_ihardlimit;
192         }
193
194         /* ignore the no quota limit case; and it can avoid creating
195          * unnecessary lqs for uid/gid */
196         if (!limit)
197                 GOTO(out, ret = 0);
198
199  search_lqs:
200         quota_search_lqs(qdata, NULL, qctxt, &lqs);
201         if (!lqs) {
202                 CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
203                 ret = quota_create_lqs(qdata, NULL, qctxt, &lqs);
204                 if (ret == -EALREADY)
205                         goto search_lqs;
206                 if (ret < 0)
207                         GOTO (out, ret);
208         }
209         spin_lock(&lqs->lqs_lock);
210
211         if (QDATA_IS_BLK(qdata)) {
212                 qunit_sz = lqs->lqs_bunit_sz;
213                 tune_sz  = lqs->lqs_btune_sz;
214                 pending_write = lqs->lqs_bwrite_pending * CFS_PAGE_SIZE;
215                 record   = lqs->lqs_blk_rec;
216                 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
217         } else {
218                 /* we didn't need change inode qunit size now */
219                 qunit_sz = lqs->lqs_iunit_sz;
220                 tune_sz  = lqs->lqs_itune_sz;
221                 pending_write = lqs->lqs_iwrite_pending;
222                 record   = lqs->lqs_ino_rec;
223         }
224
225         /* we don't count the MIN_QLIMIT */
226         if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
227             (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
228                 limit = 0;
229
230         usage += pending_write;
231         limit_org = limit;
232         /* when a releasing quota req is sent, before it returned
233            limit is assigned a small value. limit will overflow */
234         if (limit + record < 0)
235                 usage -= record;
236         else
237                 limit += record;
238
239         LASSERT(qdata->qd_count == 0);
240         if (limit <= usage + tune_sz) {
241                 while (qdata->qd_count + limit <=
242                        usage + tune_sz)
243                         qdata->qd_count += qunit_sz;
244                 ret = 1;
245         } else if (limit > usage + qunit_sz + tune_sz &&
246                    limit_org > qdata->qd_count + qunit_sz) {
247                 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
248                        limit_org > qdata->qd_count + qunit_sz)
249                         qdata->qd_count += qunit_sz;
250                 ret = 2;
251         }
252         CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
253                ", pending_write: "LPU64", record: "LPD64
254                ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
255                QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
256                record, qunit_sz, tune_sz, ret);
257         LASSERT(ret == 0 || qdata->qd_count);
258
259         if (ret > 0) {
260                 quota_compute_lqs(qdata, lqs, 1, (ret == 1) ? 1 : 0);
261                 /* when this qdata returned from mds, it will call lqs_putref */
262                 lqs_getref(lqs);
263         }
264
265         spin_unlock(&lqs->lqs_lock);
266         lqs_putref(lqs);
267         EXIT;
268  out:
269         OBD_FREE_PTR(qctl);
270         return ret;
271 }
272
273 /* compute the remaining quota for certain gid or uid b=11693 */
274 int compute_remquota(struct obd_device *obd,
275                      struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata,
276                      int isblk)
277 {
278         struct super_block *sb = qctxt->lqc_sb;
279         __u64 usage, limit;
280         struct obd_quotactl *qctl;
281         int ret = QUOTA_RET_OK;
282         ENTRY;
283
284         if (!sb_any_quota_enabled(sb))
285                 RETURN(QUOTA_RET_NOQUOTA);
286
287         /* ignore root user */
288         if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
289                 RETURN(QUOTA_RET_NOLIMIT);
290
291         OBD_ALLOC_PTR(qctl);
292         if (qctl == NULL)
293                 RETURN(-ENOMEM);
294
295         /* get fs quota usage & limit */
296         qctl->qc_cmd = Q_GETQUOTA;
297         qctl->qc_id = qdata->qd_id;
298         qctl->qc_type = QDATA_IS_GRP(qdata);
299         ret = fsfilt_quotactl(obd, sb, qctl);
300         if (ret) {
301                 if (ret == -ESRCH)      /* no limit */
302                         ret = QUOTA_RET_NOLIMIT;
303                 else
304                         CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
305                                ret);
306                 GOTO(out, ret);
307         }
308
309         usage = isblk ? qctl->qc_dqblk.dqb_curspace :
310                 qctl->qc_dqblk.dqb_curinodes;
311         limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
312                 qctl->qc_dqblk.dqb_ihardlimit;
313         if (!limit){            /* no limit */
314                 ret = QUOTA_RET_NOLIMIT;
315                 GOTO(out, ret);
316         }
317
318         if (limit >= usage)
319                 qdata->qd_count = limit - usage;
320         else
321                 qdata->qd_count = 0;
322         EXIT;
323 out:
324         OBD_FREE_PTR(qctl);
325         return ret;
326 }
327
328 /* caller must hold qunit_hash_lock */
329 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
330                                             struct qunit_data *qdata)
331 {
332         unsigned int hashent = qunit_hashfn(qctxt, qdata);
333         struct lustre_qunit *qunit;
334         ENTRY;
335
336         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
337         qunit = find_qunit(hashent, qctxt, qdata);
338         RETURN(qunit);
339 }
340
341 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
342                                         struct qunit_data *qdata, int opc)
343 {
344         struct lustre_qunit *qunit = NULL;
345         ENTRY;
346
347         OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
348         if (qunit == NULL)
349                 RETURN(NULL);
350
351         INIT_LIST_HEAD(&qunit->lq_hash);
352         INIT_LIST_HEAD(&qunit->lq_waiters);
353         atomic_set(&qunit->lq_refcnt, 1);
354         qunit->lq_ctxt = qctxt;
355         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
356         qunit->lq_opc = opc;
357
358         RETURN(qunit);
359 }
360
361 static inline void free_qunit(struct lustre_qunit *qunit)
362 {
363         OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
364 }
365
366 static inline void qunit_get(struct lustre_qunit *qunit)
367 {
368         atomic_inc(&qunit->lq_refcnt);
369 }
370
371 static void qunit_put(struct lustre_qunit *qunit)
372 {
373         LASSERT(atomic_read(&qunit->lq_refcnt));
374         if (atomic_dec_and_test(&qunit->lq_refcnt))
375                 free_qunit(qunit);
376 }
377
378 static void
379 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
380 {
381         struct list_head *head;
382
383         LASSERT(list_empty(&qunit->lq_hash));
384         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
385         list_add(&qunit->lq_hash, head);
386 }
387
388 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
389 {
390         struct lustre_qunit_size *lqs = NULL;
391
392         quota_search_lqs(&qunit->lq_data, NULL, qunit->lq_ctxt, &lqs);
393         if (lqs) {
394                 spin_lock(&lqs->lqs_lock);
395                 if (qunit->lq_opc == QUOTA_DQACQ)
396                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
397                 if (qunit->lq_opc == QUOTA_DQREL)
398                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
399                 spin_unlock(&lqs->lqs_lock);
400                 /* this is for quota_search_lqs */
401                 lqs_putref(lqs);
402                 /* this is for check_cur_qunit */
403                 lqs_putref(lqs);
404         }
405
406 }
407
408 static void remove_qunit_nolock(struct lustre_qunit *qunit)
409 {
410         LASSERT(!list_empty(&qunit->lq_hash));
411         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
412
413         list_del_init(&qunit->lq_hash);
414 }
415
416 struct qunit_waiter {
417         struct list_head qw_entry;
418         cfs_waitq_t      qw_waitq;
419         int qw_rc;
420 };
421
422 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
423                                  (limit = count) : (limit += count)
424
425
426 /* FIXME check if this mds is the master of specified id */
427 static int
428 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
429           unsigned int id, int type)
430 {
431         return qctxt->lqc_handler ? 1 : 0;
432 }
433
434 static int
435 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
436                struct qunit_data *qdata, int opc, int wait);
437
438 static int split_before_schedule_dqacq(struct obd_device *obd,
439                                        struct lustre_quota_ctxt *qctxt,
440                                        struct qunit_data *qdata, int opc, int wait)
441 {
442         int rc = 0;
443         unsigned long factor;
444         struct qunit_data tmp_qdata;
445         ENTRY;
446
447         LASSERT(qdata && qdata->qd_count);
448         QDATA_DEBUG(qdata, "%s quota split.\n",
449                     QDATA_IS_BLK(qdata) ? "block" : "inode");
450         if (QDATA_IS_BLK(qdata))
451                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
452                         qctxt->lqc_bunit_sz;
453         else
454                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
455                         qctxt->lqc_iunit_sz;
456
457         if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
458             qdata->qd_count > factor) {
459                 tmp_qdata = *qdata;
460                 tmp_qdata.qd_count = factor;
461                         qdata->qd_count -= tmp_qdata.qd_count;
462                 QDATA_DEBUG((&tmp_qdata), "be split.\n");
463                 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
464         } else{
465                 QDATA_DEBUG(qdata, "don't be split.\n");
466                 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
467         }
468
469         RETURN(rc);
470 }
471
472 static int
473 dqacq_completion(struct obd_device *obd,
474                  struct lustre_quota_ctxt *qctxt,
475                  struct qunit_data *qdata, int rc, int opc)
476 {
477         struct lustre_qunit *qunit = NULL;
478         struct super_block *sb = qctxt->lqc_sb;
479         struct qunit_waiter *qw, *tmp;
480         int err = 0;
481         struct quota_adjust_qunit *oqaq = NULL;
482         int rc1 = 0;
483         ENTRY;
484
485         LASSERT(qdata);
486         QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
487                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
488
489         /* update local operational quota file */
490         if (rc == 0) {
491                 __u32 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
492                 struct obd_quotactl *qctl;
493                 __u64 *hardlimit;
494
495                 OBD_ALLOC_PTR(qctl);
496                 if (qctl == NULL)
497                         GOTO(out, err = -ENOMEM);
498
499                 /* acq/rel qunit for specified uid/gid is serialized,
500                  * so there is no race between get fs quota limit and
501                  * set fs quota limit */
502                 qctl->qc_cmd = Q_GETQUOTA;
503                 qctl->qc_id = qdata->qd_id;
504                 qctl->qc_type = QDATA_IS_GRP(qdata);
505                 err = fsfilt_quotactl(obd, sb, qctl);
506                 if (err) {
507                         CERROR("error get quota fs limit! (rc:%d)\n", err);
508                         GOTO(out_mem, err);
509                 }
510
511                 if (QDATA_IS_BLK(qdata)) {
512                         qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
513                         hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
514                 } else {
515                         qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
516                         hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
517                 }
518
519                 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
520                 switch (opc) {
521                 case QUOTA_DQACQ:
522                         INC_QLIMIT(*hardlimit, count);
523                         break;
524                 case QUOTA_DQREL:
525                         LASSERTF(count < *hardlimit,
526                                  "count: %d, hardlimit: "LPU64".\n",
527                                  count, *hardlimit);
528                         *hardlimit -= count;
529                         break;
530                 default:
531                         LBUG();
532                 }
533
534                 /* clear quota limit */
535                 if (count == 0)
536                         *hardlimit = 0;
537
538                 qctl->qc_cmd = Q_SETQUOTA;
539                 err = fsfilt_quotactl(obd, sb, qctl);
540                 if (err)
541                         CERROR("error set quota fs limit! (rc:%d)\n", err);
542
543                 QDATA_DEBUG(qdata, "%s completion\n",
544                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
545 out_mem:
546                 OBD_FREE_PTR(qctl);
547         } else if (rc == -EDQUOT) {
548                 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
549         } else if (rc == -EBUSY) {
550                 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
551         } else {
552                 CERROR("acquire qunit got error! (rc:%d)\n", rc);
553         }
554 out:
555         /* remove the qunit from hash */
556         spin_lock(&qunit_hash_lock);
557
558         qunit = dqacq_in_flight(qctxt, qdata);
559         /* this qunit has been removed by qctxt_cleanup() */
560         if (!qunit) {
561                 spin_unlock(&qunit_hash_lock);
562                 RETURN(err);
563         }
564
565         LASSERT(opc == qunit->lq_opc);
566         remove_qunit_nolock(qunit);
567         spin_unlock(&qunit_hash_lock);
568
569         compute_lqs_after_removing_qunit(qunit);
570
571         /* wake up all waiters */
572         list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
573                 list_del_init(&qw->qw_entry);
574                 qw->qw_rc = rc;
575                 wake_up(&qw->qw_waitq);
576         }
577
578         qunit_put(qunit);
579
580         /* don't reschedule in such cases:
581          *   - acq/rel failure and qunit isn't changed,
582          *     but not for quota recovery.
583          *   - local dqacq/dqrel.
584          *   - local disk io failure.
585          */
586          OBD_ALLOC_PTR(oqaq);
587          if (!oqaq)
588                  RETURN(-ENOMEM);
589          qdata_to_oqaq(qdata, oqaq);
590          /* adjust the qunit size in slaves */
591          rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
592          OBD_FREE_PTR(oqaq);
593          if (rc1 < 0) {
594                  CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
595                  RETURN(rc1);
596          }
597          if (err || (rc && rc != -EBUSY && rc1 == 0) ||
598              is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata)))
599                 RETURN(err);
600
601         /* reschedule another dqacq/dqrel if needed */
602         qdata->qd_count = 0;
603         qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
604         rc1 = check_cur_qunit(obd, qctxt, qdata);
605         if (rc1 > 0) {
606                 int opc;
607                 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
608                 rc1 = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
609                 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
610         }
611         RETURN(err);
612 }
613
614 struct dqacq_async_args {
615         struct lustre_quota_ctxt *aa_ctxt;
616         struct lustre_qunit *aa_qunit;
617 };
618
619 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
620 {
621         struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
622         struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
623         struct lustre_qunit *qunit = aa->aa_qunit;
624         struct obd_device *obd = req->rq_import->imp_obd;
625         struct qunit_data *qdata = NULL;
626         int rc1 = 0;
627         ENTRY;
628
629         LASSERT(req);
630         LASSERT(req->rq_import);
631
632         /* there are several forms of qunit(historic causes), so we need to
633          * adjust qunit from slaves to the same form here */
634         OBD_ALLOC(qdata, sizeof(struct qunit_data));
635         if (!qdata)
636                 RETURN(-ENOMEM);
637         rc1 = quota_get_qdata(req, qdata, QUOTA_REPLY, QUOTA_IMPORT);
638         if (rc1 < 0) {
639                 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
640                 GOTO(exit, rc = -EPROTO);
641         }
642
643         QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
644         QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
645
646         if (qdata->qd_id != qunit->lq_data.qd_id ||
647             OBD_FAIL_CHECK_ONCE(OBD_FAIL_QUOTA_RET_QDATA)) {
648                 CDEBUG(D_ERROR, "the returned qd_id isn't expected!"
649                        "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
650                        qunit->lq_data.qd_id);
651                 qdata->qd_id = qunit->lq_data.qd_id;
652                 rc = -EPROTO;
653         }
654         if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
655                 CDEBUG(D_ERROR, "the returned grp/usr isn't expected!"
656                        "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
657                        qunit->lq_data.qd_flags);
658                 if (QDATA_IS_GRP(&qunit->lq_data))
659                         QDATA_SET_GRP(qdata);
660                 else
661                         QDATA_CLR_GRP(qdata);
662                 rc = -EPROTO;
663         }
664         if (qdata->qd_count > qunit->lq_data.qd_count) {
665                 CDEBUG(D_ERROR, "the returned qd_count isn't expected!"
666                        "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
667                        qunit->lq_data.qd_count);
668                 rc = -EPROTO;
669         }
670
671         rc = dqacq_completion(obd, qctxt, qdata, rc,
672                               lustre_msg_get_opc(req->rq_reqmsg));
673
674 exit:
675         OBD_FREE(qdata, sizeof(struct qunit_data));
676
677         RETURN(rc);
678 }
679
680 static int got_qunit(struct qunit_waiter *waiter)
681 {
682         int rc = 0;
683         ENTRY;
684         spin_lock(&qunit_hash_lock);
685         rc = list_empty(&waiter->qw_entry);
686         spin_unlock(&qunit_hash_lock);
687         RETURN(rc);
688 }
689
690 static int
691 schedule_dqacq(struct obd_device *obd,
692                struct lustre_quota_ctxt *qctxt,
693                struct qunit_data *qdata, int opc, int wait)
694 {
695         struct lustre_qunit *qunit, *empty;
696         struct qunit_waiter qw;
697         struct l_wait_info lwi = { 0 };
698         struct ptlrpc_request *req;
699         struct dqacq_async_args *aa;
700         int size[2] = { sizeof(struct ptlrpc_body), 0 };
701         struct obd_import *imp = NULL;
702         unsigned long factor;
703         int rc = 0;
704         ENTRY;
705
706         INIT_LIST_HEAD(&qw.qw_entry);
707         init_waitqueue_head(&qw.qw_waitq);
708         qw.qw_rc = 0;
709
710         if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
711                 RETURN(-ENOMEM);
712
713         spin_lock(&qunit_hash_lock);
714
715         qunit = dqacq_in_flight(qctxt, qdata);
716         if (qunit) {
717                 struct lustre_qunit_size *lqs = NULL;
718
719                 if (wait)
720                         list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
721                 spin_unlock(&qunit_hash_lock);
722                 free_qunit(empty);
723
724                 quota_search_lqs(qdata, NULL, qctxt, &lqs);
725                 if (lqs) {
726                         spin_lock(&lqs->lqs_lock);
727                         quota_compute_lqs(qdata, lqs, 0,
728                                           (opc == QUOTA_DQACQ) ? 1 : 0);
729                         spin_unlock(&lqs->lqs_lock);
730                         /* this is for quota_search_lqs */
731                         lqs_putref(lqs);
732                         /* this is for check_cur_qunit */
733                         lqs_putref(lqs);
734                 } else {
735                         CDEBUG(D_ERROR, "Can't find the lustre qunit size!\n");
736                 }
737
738                 goto wait_completion;
739         }
740         qunit = empty;
741         insert_qunit_nolock(qctxt, qunit);
742         if (wait)
743                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
744         spin_unlock(&qunit_hash_lock);
745
746         LASSERT(qunit);
747
748         QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
749                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
750         /* master is going to dqacq/dqrel from itself */
751         if (is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata))) {
752                 int rc2;
753                 QDATA_DEBUG(qdata, "local %s.\n",
754                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
755                 rc = qctxt->lqc_handler(obd, qdata, opc);
756                 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
757                 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
758         }
759
760         spin_lock(&qctxt->lqc_lock);
761         if (!qctxt->lqc_import) {
762                 spin_unlock(&qctxt->lqc_lock);
763                 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
764                 spin_lock(&qunit_hash_lock);
765                 if (wait)
766                         list_del_init(&qw.qw_entry);
767                 remove_qunit_nolock(qunit);
768                 qunit = NULL;
769                 spin_unlock(&qunit_hash_lock);
770                 compute_lqs_after_removing_qunit(qunit);
771                 free_qunit(empty);
772                 RETURN(-EAGAIN);
773         } else {
774                 imp = class_import_get(qctxt->lqc_import);
775         }
776         spin_unlock(&qctxt->lqc_lock);
777
778         /* build dqacq/dqrel request */
779         LASSERT(imp);
780         size[1] = quota_get_qunit_data_size(imp->
781                                             imp_connect_data.ocd_connect_flags);
782
783         req = ptlrpc_prep_req(imp, LUSTRE_MDS_VERSION, opc, 2,
784                               size, NULL);
785         if (!req) {
786                 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
787                 class_import_put(imp);
788                 RETURN(-ENOMEM);
789         }
790
791         if (QDATA_IS_BLK(qdata))
792                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
793                         qctxt->lqc_bunit_sz;
794         else
795                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
796                         qctxt->lqc_iunit_sz;
797
798         LASSERTF(!should_translate_quota(imp) ||
799                  qdata->qd_count <= factor,
800                  "qd_count: "LPU64"; should_translate_quota: %d.\n",
801                  qdata->qd_count, should_translate_quota(imp));
802         rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
803         if (rc < 0) {
804                 CDEBUG(D_ERROR, "Can't pack qunit_data\n");
805                 RETURN(-EPROTO);
806         }
807         ptlrpc_req_set_repsize(req, 2, size);
808         class_import_put(imp);
809
810         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
811         aa = (struct dqacq_async_args *)&req->rq_async_args;
812         aa->aa_ctxt = qctxt;
813         aa->aa_qunit = qunit;
814
815         req->rq_interpret_reply = dqacq_interpret;
816         ptlrpcd_add_req(req);
817
818         QDATA_DEBUG(qdata, "%s scheduled.\n",
819                     opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
820 wait_completion:
821         if (wait && qunit) {
822                 struct qunit_data *p = &qunit->lq_data;
823                 QDATA_DEBUG(p, "wait for dqacq.\n");
824
825                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
826                 if (qw.qw_rc == 0)
827                         rc = -EAGAIN;
828
829                 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
830         }
831         RETURN(rc);
832 }
833
834 int
835 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
836                    uid_t uid, gid_t gid, __u32 isblk, int wait)
837 {
838         int ret, rc = 0, i = USRQUOTA;
839         __u32 id[MAXQUOTAS] = { uid, gid };
840         struct qunit_data qdata[MAXQUOTAS];
841         ENTRY;
842
843         CLASSERT(MAXQUOTAS < 4);
844         if (!sb_any_quota_enabled(qctxt->lqc_sb))
845                 RETURN(0);
846
847         for (i = 0; i < MAXQUOTAS; i++) {
848                 qdata[i].qd_id = id[i];
849                 qdata[i].qd_flags = i;
850                 if (isblk)
851                         QDATA_SET_BLK(&qdata[i]);
852                 qdata[i].qd_count = 0;
853
854                 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
855                 if (ret > 0) {
856                         int opc;
857                         /* need acquire or release */
858                         opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
859                         ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i], 
860                                                           opc, wait);
861                         if (!rc)
862                                 rc = ret;
863                 }
864         }
865
866         RETURN(rc);
867 }
868
869 int
870 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
871                          unsigned short type, int isblk)
872 {
873         struct lustre_qunit *qunit = NULL;
874         struct qunit_waiter qw;
875         struct qunit_data qdata;
876         struct l_wait_info lwi = { 0 };
877         ENTRY;
878
879         INIT_LIST_HEAD(&qw.qw_entry);
880         init_waitqueue_head(&qw.qw_waitq);
881         qw.qw_rc = 0;
882
883         qdata.qd_id = id;
884         qdata.qd_flags = type;
885         if (isblk)
886                 QDATA_SET_BLK(&qdata);
887         qdata.qd_count = 0;
888
889         spin_lock(&qunit_hash_lock);
890
891         qunit = dqacq_in_flight(qctxt, &qdata);
892         if (qunit)
893                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
894
895         spin_unlock(&qunit_hash_lock);
896
897         if (qunit) {
898                 struct qunit_data *p = &qdata;
899                 QDATA_DEBUG(p, "wait for dqacq completion.\n");
900                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
901                 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
902         }
903         RETURN(0);
904 }
905
906 int
907 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
908            dqacq_handler_t handler)
909 {
910         int rc = 0;
911         ENTRY;
912
913         LASSERT(qctxt);
914
915         rc = ptlrpcd_addref();
916         if (rc)
917                 RETURN(rc);
918
919         spin_lock_init(&qctxt->lqc_lock);
920         spin_lock(&qctxt->lqc_lock);
921         qctxt->lqc_handler = handler;
922         qctxt->lqc_sb = sb;
923         qctxt->lqc_import = NULL;
924         qctxt->lqc_recovery = 0;
925         qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
926         qctxt->lqc_cqs_boundary_factor = 4;
927         qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
928         qctxt->lqc_cqs_least_iunit = 1;
929         qctxt->lqc_cqs_qs_factor = 2;
930         qctxt->lqc_atype = 0;
931         qctxt->lqc_status= 0;
932         qctxt->lqc_bunit_sz = default_bunit_sz;
933         qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
934         qctxt->lqc_iunit_sz = default_iunit_sz;
935         qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
936         qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
937                                           * after the last shrinking */
938         rc = lustre_hash_init(&LQC_HASH_BODY(qctxt), "LQS_HASH",128,
939                               &lqs_hash_operations);
940         if (rc) {
941                 CDEBUG(D_ERROR, "initialize hash lqs on ost error!\n");
942                 lustre_hash_exit(&LQC_HASH_BODY(qctxt));
943         }
944         spin_unlock(&qctxt->lqc_lock);
945
946         RETURN(rc);
947 }
948
949 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
950 {
951         struct lustre_qunit *qunit, *tmp;
952         struct qunit_waiter *qw, *tmp2;
953         struct list_head tmp_list;
954         int i;
955         ENTRY;
956
957         INIT_LIST_HEAD(&tmp_list);
958
959         spin_lock(&qunit_hash_lock);
960         for (i = 0; i < NR_DQHASH; i++) {
961                 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
962                         if (qunit->lq_ctxt != qctxt)
963                                 continue;
964                         remove_qunit_nolock(qunit);
965                         list_add(&qunit->lq_hash, &tmp_list);
966                 }
967         }
968         spin_unlock(&qunit_hash_lock);
969
970         list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
971                 list_del_init(&qunit->lq_hash);
972                 compute_lqs_after_removing_qunit(qunit);
973                 /* wake up all waiters */
974                 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
975                                          qw_entry) {
976                         list_del_init(&qw->qw_entry);
977                         qw->qw_rc = 0;
978                         wake_up(&qw->qw_waitq);
979                 }
980                 qunit_put(qunit);
981         }
982
983         lustre_hash_exit(&LQC_HASH_BODY(qctxt));
984         ptlrpcd_decref();
985
986         EXIT;
987 }
988
989 struct qslave_recov_thread_data {
990         struct obd_device *obd;
991         struct lustre_quota_ctxt *qctxt;
992         struct completion comp;
993 };
994
995 /* FIXME only recovery block quota by now */
996 static int qslave_recovery_main(void *arg)
997 {
998         struct qslave_recov_thread_data *data = arg;
999         struct obd_device *obd = data->obd;
1000         struct lustre_quota_ctxt *qctxt = data->qctxt;
1001         unsigned int type;
1002         int rc = 0;
1003         ENTRY;
1004
1005         ptlrpc_daemonize("qslave_recovd");
1006
1007         complete(&data->comp);
1008
1009         if (qctxt->lqc_recovery)
1010                 RETURN(0);
1011         qctxt->lqc_recovery = 1;
1012
1013         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1014                 struct qunit_data qdata;
1015                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1016                 struct list_head id_list;
1017                 struct dquot_id *dqid, *tmp;
1018                 int ret;
1019
1020                 LOCK_DQONOFF_MUTEX(dqopt);
1021                 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
1022                         UNLOCK_DQONOFF_MUTEX(dqopt);
1023                         break;
1024                 }
1025
1026                 LASSERT(dqopt->files[type] != NULL);
1027                 INIT_LIST_HEAD(&id_list);
1028 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1029                 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1030 #else
1031                 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1032 #endif
1033                 UNLOCK_DQONOFF_MUTEX(dqopt);
1034                 if (rc)
1035                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1036
1037                 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1038                         list_del_init(&dqid->di_link);
1039                         /* skip slave recovery on itself */
1040                         if (is_master(obd, qctxt, dqid->di_id, type))
1041                                 goto free;
1042                         if (rc && rc != -EBUSY)
1043                                 goto free;
1044
1045                         qdata.qd_id = dqid->di_id;
1046                         qdata.qd_flags = type;
1047                         QDATA_SET_BLK(&qdata);
1048                         qdata.qd_count = 0;
1049
1050                         ret = check_cur_qunit(obd, qctxt, &qdata);
1051                         if (ret > 0) {
1052                                 int opc;
1053                                 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1054                                 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
1055                         } else
1056                                 rc = 0;
1057
1058                         if (rc)
1059                                 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
1060                                        "qslave recovery failed! (id:%d type:%d "
1061                                        " rc:%d)\n", dqid->di_id, type, rc);
1062 free:
1063                         kfree(dqid);
1064                 }
1065         }
1066
1067         qctxt->lqc_recovery = 0;
1068         RETURN(rc);
1069 }
1070
1071 void
1072 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1073 {
1074         struct qslave_recov_thread_data data;
1075         int rc;
1076         ENTRY;
1077
1078         if (!sb_any_quota_enabled(qctxt->lqc_sb))
1079                 goto exit;
1080
1081         data.obd = obd;
1082         data.qctxt = qctxt;
1083         init_completion(&data.comp);
1084
1085         rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
1086         if (rc < 0) {
1087                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1088                 goto exit;
1089         }
1090         wait_for_completion(&data.comp);
1091 exit:
1092         EXIT;
1093 }
1094