Whamcloud - gitweb
bb61f07a3b7a3ae20f2f1631387f88d555ccb080
[fs/lustre-release.git] / lustre / quota / quota_context.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/quota/quota_context.c
5  *  Lustre Quota Context
6  *
7  *  Copyright (c) 2001-2005 Cluster File Systems, Inc.
8  *   Author: Niu YaWei <niu@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   No redistribution or use is permitted outside of Cluster File Systems, Inc.
13  *
14  */
15 #ifndef EXPORT_SYMTAB
16 # define EXPORT_SYMTAB
17 #endif
18
19 #define DEBUG_SUBSYSTEM S_MDS
20
21 #include <linux/version.h>
22 #include <linux/fs.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include <class_hash.h>
33 #include "quota_internal.h"
34
35 extern struct lustre_hash_operations lqs_hash_operations;
36
37 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
38 unsigned long default_btune_ratio = 50;             /* 50 percentage */
39 unsigned long default_iunit_sz = 5120;              /* 5120 inodes */
40 unsigned long default_itune_ratio = 50;             /* 50 percentage */
41
42 cfs_mem_cache_t *qunit_cachep = NULL;
43 struct list_head qunit_hash[NR_DQHASH];
44 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
45
46 struct lustre_qunit {
47         struct list_head lq_hash;          /* Hash list in memory */
48         atomic_t lq_refcnt;                /* Use count */
49         struct lustre_quota_ctxt *lq_ctxt; /* Quota context this applies to */
50         struct qunit_data lq_data;         /* See qunit_data */
51         unsigned int lq_opc;               /* QUOTA_DQACQ, QUOTA_DQREL */
52         struct list_head lq_waiters;       /* Threads waiting for this qunit */
53 };
54
55 int should_translate_quota (struct obd_import *imp)
56 {
57         ENTRY;
58
59         LASSERT(imp);
60 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(1, 7, 0, 0)
61         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64 &&
62             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
63 #else
64         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
65 #endif
66                 RETURN(0);
67         else
68                 RETURN(1);
69 }
70
71 void qunit_cache_cleanup(void)
72 {
73         int i;
74         ENTRY;
75
76         spin_lock(&qunit_hash_lock);
77         for (i = 0; i < NR_DQHASH; i++)
78                 LASSERT(list_empty(qunit_hash + i));
79         spin_unlock(&qunit_hash_lock);
80
81         if (qunit_cachep) {
82                 int rc;
83                 rc = cfs_mem_cache_destroy(qunit_cachep);
84                 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
85                 qunit_cachep = NULL;
86         }
87         EXIT;
88 }
89
90 int qunit_cache_init(void)
91 {
92         int i;
93         ENTRY;
94
95         LASSERT(qunit_cachep == NULL);
96         qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
97                                             sizeof(struct lustre_qunit),
98                                             0, 0);
99         if (!qunit_cachep)
100                 RETURN(-ENOMEM);
101
102         spin_lock(&qunit_hash_lock);
103         for (i = 0; i < NR_DQHASH; i++)
104                 INIT_LIST_HEAD(qunit_hash + i);
105         spin_unlock(&qunit_hash_lock);
106         RETURN(0);
107 }
108
109 static inline int
110 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
111              __attribute__((__const__));
112
113 static inline int
114 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
115 {
116         unsigned int id = qdata->qd_id;
117         unsigned int type = QDATA_IS_GRP(qdata);
118
119         unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
120         tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
121         return tmp;
122 }
123
124 /* caller must hold qunit_hash_lock */
125 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
126                                               struct lustre_quota_ctxt *qctxt,
127                                               struct qunit_data *qdata)
128 {
129         struct lustre_qunit *qunit = NULL;
130         struct qunit_data *tmp;
131
132         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
133         list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
134                 tmp = &qunit->lq_data;
135                 if (qunit->lq_ctxt == qctxt &&
136                     qdata->qd_id == tmp->qd_id &&
137                     (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
138                     (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
139                         return qunit;
140         }
141         return NULL;
142 }
143
144 /* check_cur_qunit - check the current usage of qunit.
145  * @qctxt: quota context
146  * @qdata: the type of quota unit to be checked
147  *
148  * return: 1 - need acquire qunit;
149  *         2 - need release qunit;
150  *         0 - need do nothing.
151  *       < 0 - error.
152  */
153 static int
154 check_cur_qunit(struct obd_device *obd,
155                 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
156 {
157         struct super_block *sb = qctxt->lqc_sb;
158         unsigned long qunit_sz, tune_sz;
159         __u64 usage, limit, limit_org, pending_write = 0;
160         long long record = 0;
161         struct obd_quotactl *qctl;
162         struct lustre_qunit_size *lqs = NULL;
163         int ret = 0;
164         ENTRY;
165
166         if (!sb_any_quota_enabled(sb))
167                 RETURN(0);
168
169         OBD_ALLOC_PTR(qctl);
170         if (qctl == NULL)
171                 RETURN(-ENOMEM);
172
173         /* get fs quota usage & limit */
174         qctl->qc_cmd = Q_GETQUOTA;
175         qctl->qc_id = qdata->qd_id;
176         qctl->qc_type = QDATA_IS_GRP(qdata);
177         ret = fsfilt_quotactl(obd, sb, qctl);
178         if (ret) {
179                 if (ret == -ESRCH)      /* no limit */
180                         ret = 0;
181                 else
182                         CERROR("can't get fs quota usage! (rc:%d)\n", ret);
183                 GOTO(out, ret);
184         }
185
186         if (QDATA_IS_BLK(qdata)) {
187                 usage = qctl->qc_dqblk.dqb_curspace;
188                 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
189         } else {
190                 usage = qctl->qc_dqblk.dqb_curinodes;
191                 limit = qctl->qc_dqblk.dqb_ihardlimit;
192         }
193
194         /* ignore the no quota limit case; and it can avoid creating
195          * unnecessary lqs for uid/gid */
196         if (!limit)
197                 GOTO(out, ret = 0);
198
199  search_lqs:
200         quota_search_lqs(qdata, NULL, qctxt, &lqs);
201         if (!lqs) {
202                 CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
203                 ret = quota_create_lqs(qdata, NULL, qctxt, &lqs);
204                 if (ret == -EALREADY) {
205                         ret = 0;
206                         goto search_lqs;
207                 }
208                 if (ret < 0)
209                         GOTO (out, ret);
210         }
211         spin_lock(&lqs->lqs_lock);
212
213         if (QDATA_IS_BLK(qdata)) {
214                 qunit_sz = lqs->lqs_bunit_sz;
215                 tune_sz  = lqs->lqs_btune_sz;
216                 pending_write = lqs->lqs_bwrite_pending * CFS_PAGE_SIZE;
217                 record   = lqs->lqs_blk_rec;
218                 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
219         } else {
220                 /* we didn't need change inode qunit size now */
221                 qunit_sz = lqs->lqs_iunit_sz;
222                 tune_sz  = lqs->lqs_itune_sz;
223                 pending_write = lqs->lqs_iwrite_pending;
224                 record   = lqs->lqs_ino_rec;
225         }
226
227         /* we don't count the MIN_QLIMIT */
228         if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
229             (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
230                 limit = 0;
231
232         usage += pending_write;
233         limit_org = limit;
234         /* when a releasing quota req is sent, before it returned
235            limit is assigned a small value. limit will overflow */
236         if (limit + record < 0)
237                 usage -= record;
238         else
239                 limit += record;
240
241         LASSERT(qdata->qd_count == 0);
242         if (limit <= usage + tune_sz) {
243                 while (qdata->qd_count + limit <=
244                        usage + tune_sz)
245                         qdata->qd_count += qunit_sz;
246                 ret = 1;
247         } else if (limit > usage + qunit_sz + tune_sz &&
248                    limit_org > qdata->qd_count + qunit_sz) {
249                 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
250                        limit_org > qdata->qd_count + qunit_sz)
251                         qdata->qd_count += qunit_sz;
252                 ret = 2;
253         }
254         CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
255                ", pending_write: "LPU64", record: "LPD64
256                ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
257                QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
258                record, qunit_sz, tune_sz, ret);
259         LASSERT(ret == 0 || qdata->qd_count);
260
261         if (ret > 0) {
262                 quota_compute_lqs(qdata, lqs, 1, (ret == 1) ? 1 : 0);
263                 /* when this qdata returned from mds, it will call lqs_putref */
264                 lqs_getref(lqs);
265         }
266
267         spin_unlock(&lqs->lqs_lock);
268         lqs_putref(lqs);
269         EXIT;
270  out:
271         OBD_FREE_PTR(qctl);
272         return ret;
273 }
274
275 /* compute the remaining quota for certain gid or uid b=11693 */
276 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
277                      struct qunit_data *qdata, int isblk)
278 {
279         struct super_block *sb = qctxt->lqc_sb;
280         __u64 usage, limit;
281         struct obd_quotactl *qctl;
282         int ret = QUOTA_RET_OK;
283         ENTRY;
284
285         if (!sb_any_quota_enabled(sb))
286                 RETURN(QUOTA_RET_NOQUOTA);
287
288         /* ignore root user */
289         if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
290                 RETURN(QUOTA_RET_NOLIMIT);
291
292         OBD_ALLOC_PTR(qctl);
293         if (qctl == NULL)
294                 RETURN(-ENOMEM);
295
296         /* get fs quota usage & limit */
297         qctl->qc_cmd = Q_GETQUOTA;
298         qctl->qc_id = qdata->qd_id;
299         qctl->qc_type = QDATA_IS_GRP(qdata);
300         ret = fsfilt_quotactl(obd, sb, qctl);
301         if (ret) {
302                 if (ret == -ESRCH)      /* no limit */
303                         ret = QUOTA_RET_NOLIMIT;
304                 else
305                         CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
306                                ret);
307                 GOTO(out, ret);
308         }
309
310         usage = isblk ? qctl->qc_dqblk.dqb_curspace :
311                 qctl->qc_dqblk.dqb_curinodes;
312         limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
313                 qctl->qc_dqblk.dqb_ihardlimit;
314         if (!limit){            /* no limit */
315                 ret = QUOTA_RET_NOLIMIT;
316                 GOTO(out, ret);
317         }
318
319         if (limit >= usage)
320                 qdata->qd_count = limit - usage;
321         else
322                 qdata->qd_count = 0;
323         EXIT;
324 out:
325         OBD_FREE_PTR(qctl);
326         return ret;
327 }
328
329 /* caller must hold qunit_hash_lock */
330 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
331                                             struct qunit_data *qdata)
332 {
333         unsigned int hashent = qunit_hashfn(qctxt, qdata);
334         struct lustre_qunit *qunit;
335         ENTRY;
336
337         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
338         qunit = find_qunit(hashent, qctxt, qdata);
339         RETURN(qunit);
340 }
341
342 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
343                                         struct qunit_data *qdata, int opc)
344 {
345         struct lustre_qunit *qunit = NULL;
346         ENTRY;
347
348         OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
349         if (qunit == NULL)
350                 RETURN(NULL);
351
352         INIT_LIST_HEAD(&qunit->lq_hash);
353         INIT_LIST_HEAD(&qunit->lq_waiters);
354         atomic_set(&qunit->lq_refcnt, 1);
355         qunit->lq_ctxt = qctxt;
356         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
357         qunit->lq_opc = opc;
358
359         RETURN(qunit);
360 }
361
362 static inline void free_qunit(struct lustre_qunit *qunit)
363 {
364         OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
365 }
366
367 static inline void qunit_get(struct lustre_qunit *qunit)
368 {
369         atomic_inc(&qunit->lq_refcnt);
370 }
371
372 static void qunit_put(struct lustre_qunit *qunit)
373 {
374         LASSERT(atomic_read(&qunit->lq_refcnt));
375         if (atomic_dec_and_test(&qunit->lq_refcnt))
376                 free_qunit(qunit);
377 }
378
379 static void
380 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
381 {
382         struct list_head *head;
383
384         LASSERT(list_empty(&qunit->lq_hash));
385         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
386         list_add(&qunit->lq_hash, head);
387 }
388
389 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
390 {
391         struct lustre_qunit_size *lqs = NULL;
392
393         quota_search_lqs(&qunit->lq_data, NULL, qunit->lq_ctxt, &lqs);
394         if (lqs) {
395                 spin_lock(&lqs->lqs_lock);
396                 if (qunit->lq_opc == QUOTA_DQACQ)
397                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
398                 if (qunit->lq_opc == QUOTA_DQREL)
399                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
400                 spin_unlock(&lqs->lqs_lock);
401                 /* this is for quota_search_lqs */
402                 lqs_putref(lqs);
403                 /* this is for check_cur_qunit */
404                 lqs_putref(lqs);
405         }
406
407 }
408
409 static void remove_qunit_nolock(struct lustre_qunit *qunit)
410 {
411         LASSERT(!list_empty(&qunit->lq_hash));
412         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
413
414         list_del_init(&qunit->lq_hash);
415 }
416
417 struct qunit_waiter {
418         struct list_head qw_entry;
419         cfs_waitq_t      qw_waitq;
420         int qw_rc;
421 };
422
423 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
424                                  (limit = count) : (limit += count)
425
426
427 /* FIXME check if this mds is the master of specified id */
428 static int
429 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
430           unsigned int id, int type)
431 {
432         return qctxt->lqc_handler ? 1 : 0;
433 }
434
435 static int
436 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
437                struct qunit_data *qdata, int opc, int wait);
438
439 static int split_before_schedule_dqacq(struct obd_device *obd,
440                                        struct lustre_quota_ctxt *qctxt,
441                                        struct qunit_data *qdata, int opc, int wait)
442 {
443         int rc = 0;
444         unsigned long factor;
445         struct qunit_data tmp_qdata;
446         ENTRY;
447
448         LASSERT(qdata && qdata->qd_count);
449         QDATA_DEBUG(qdata, "%s quota split.\n",
450                     QDATA_IS_BLK(qdata) ? "block" : "inode");
451         if (QDATA_IS_BLK(qdata))
452                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
453                         qctxt->lqc_bunit_sz;
454         else
455                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
456                         qctxt->lqc_iunit_sz;
457
458         if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
459             qdata->qd_count > factor) {
460                 tmp_qdata = *qdata;
461                 tmp_qdata.qd_count = factor;
462                         qdata->qd_count -= tmp_qdata.qd_count;
463                 QDATA_DEBUG((&tmp_qdata), "be split.\n");
464                 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
465         } else{
466                 QDATA_DEBUG(qdata, "don't be split.\n");
467                 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
468         }
469
470         RETURN(rc);
471 }
472
473 static int
474 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
475                  struct qunit_data *qdata, int rc, int opc)
476 {
477         struct lustre_qunit *qunit = NULL;
478         struct super_block *sb = qctxt->lqc_sb;
479         struct qunit_waiter *qw, *tmp;
480         int err = 0;
481         struct quota_adjust_qunit *oqaq = NULL;
482         int rc1 = 0;
483         ENTRY;
484
485         LASSERT(qdata);
486         QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
487                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
488
489         /* update local operational quota file */
490         if (rc == 0) {
491                 __u32 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
492                 struct obd_quotactl *qctl;
493                 __u64 *hardlimit;
494
495                 OBD_ALLOC_PTR(qctl);
496                 if (qctl == NULL)
497                         GOTO(out, err = -ENOMEM);
498
499                 /* acq/rel qunit for specified uid/gid is serialized,
500                  * so there is no race between get fs quota limit and
501                  * set fs quota limit */
502                 qctl->qc_cmd = Q_GETQUOTA;
503                 qctl->qc_id = qdata->qd_id;
504                 qctl->qc_type = QDATA_IS_GRP(qdata);
505                 err = fsfilt_quotactl(obd, sb, qctl);
506                 if (err) {
507                         CERROR("error get quota fs limit! (rc:%d)\n", err);
508                         GOTO(out_mem, err);
509                 }
510
511                 if (QDATA_IS_BLK(qdata)) {
512                         qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
513                         hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
514                 } else {
515                         qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
516                         hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
517                 }
518
519                 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
520                 switch (opc) {
521                 case QUOTA_DQACQ:
522                         INC_QLIMIT(*hardlimit, count);
523                         break;
524                 case QUOTA_DQREL:
525                         LASSERTF(count < *hardlimit,
526                                  "count: %d, hardlimit: "LPU64".\n",
527                                  count, *hardlimit);
528                         *hardlimit -= count;
529                         break;
530                 default:
531                         LBUG();
532                 }
533
534                 /* clear quota limit */
535                 if (count == 0)
536                         *hardlimit = 0;
537
538                 qctl->qc_cmd = Q_SETQUOTA;
539                 err = fsfilt_quotactl(obd, sb, qctl);
540                 if (err)
541                         CERROR("error set quota fs limit! (rc:%d)\n", err);
542
543                 QDATA_DEBUG(qdata, "%s completion\n",
544                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
545 out_mem:
546                 OBD_FREE_PTR(qctl);
547         } else if (rc == -EDQUOT) {
548                 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
549         } else if (rc == -EBUSY) {
550                 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
551         } else {
552                 CERROR("acquire qunit got error! (rc:%d)\n", rc);
553         }
554 out:
555         /* remove the qunit from hash */
556         spin_lock(&qunit_hash_lock);
557
558         qunit = dqacq_in_flight(qctxt, qdata);
559         /* this qunit has been removed by qctxt_cleanup() */
560         if (!qunit) {
561                 spin_unlock(&qunit_hash_lock);
562                 RETURN(err);
563         }
564
565         LASSERT(opc == qunit->lq_opc);
566         remove_qunit_nolock(qunit);
567         spin_unlock(&qunit_hash_lock);
568
569         compute_lqs_after_removing_qunit(qunit);
570
571         /* wake up all waiters */
572         list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
573                 list_del_init(&qw->qw_entry);
574                 qw->qw_rc = rc;
575                 wake_up(&qw->qw_waitq);
576         }
577
578         qunit_put(qunit);
579
580         /* don't reschedule in such cases:
581          *   - acq/rel failure and qunit isn't changed,
582          *     but not for quota recovery.
583          *   - local dqacq/dqrel.
584          *   - local disk io failure.
585          */
586          OBD_ALLOC_PTR(oqaq);
587          if (!oqaq)
588                  RETURN(-ENOMEM);
589          qdata_to_oqaq(qdata, oqaq);
590          /* adjust the qunit size in slaves */
591          rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
592          OBD_FREE_PTR(oqaq);
593          if (rc1 < 0) {
594                  CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
595                  RETURN(rc1);
596          }
597          if (err || (rc && rc != -EBUSY && rc1 == 0) ||
598              is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata)))
599                 RETURN(err);
600
601         /* reschedule another dqacq/dqrel if needed */
602         qdata->qd_count = 0;
603         qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
604         rc1 = check_cur_qunit(obd, qctxt, qdata);
605         if (rc1 > 0) {
606                 int opc;
607                 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
608                 rc1 = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
609                 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
610         }
611         RETURN(err);
612 }
613
614 struct dqacq_async_args {
615         struct lustre_quota_ctxt *aa_ctxt;
616         struct lustre_qunit *aa_qunit;
617 };
618
619 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
620 {
621         struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
622         struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
623         struct lustre_qunit *qunit = aa->aa_qunit;
624         struct obd_device *obd = req->rq_import->imp_obd;
625         struct qunit_data *qdata = NULL;
626         int rc1 = 0;
627         ENTRY;
628
629         LASSERT(req);
630         LASSERT(req->rq_import);
631
632         /* there are several forms of qunit(historic causes), so we need to
633          * adjust qunit from slaves to the same form here */
634         OBD_ALLOC(qdata, sizeof(struct qunit_data));
635         if (!qdata)
636                 RETURN(-ENOMEM);
637         rc1 = quota_get_qdata(req, qdata, QUOTA_REPLY, QUOTA_IMPORT);
638         if (rc1 < 0) {
639                 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
640                 GOTO(exit, rc = -EPROTO);
641         }
642
643         QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
644         QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
645
646         if (qdata->qd_id != qunit->lq_data.qd_id ||
647             OBD_FAIL_CHECK_ONCE(OBD_FAIL_QUOTA_RET_QDATA)) {
648                 CDEBUG(D_ERROR, "the returned qd_id isn't expected!"
649                        "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
650                        qunit->lq_data.qd_id);
651                 qdata->qd_id = qunit->lq_data.qd_id;
652                 rc = -EPROTO;
653         }
654         if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
655                 CDEBUG(D_ERROR, "the returned grp/usr isn't expected!"
656                        "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
657                        qunit->lq_data.qd_flags);
658                 if (QDATA_IS_GRP(&qunit->lq_data))
659                         QDATA_SET_GRP(qdata);
660                 else
661                         QDATA_CLR_GRP(qdata);
662                 rc = -EPROTO;
663         }
664         if (qdata->qd_count > qunit->lq_data.qd_count) {
665                 CDEBUG(D_ERROR, "the returned qd_count isn't expected!"
666                        "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
667                        qunit->lq_data.qd_count);
668                 rc = -EPROTO;
669         }
670
671         rc = dqacq_completion(obd, qctxt, qdata, rc,
672                               lustre_msg_get_opc(req->rq_reqmsg));
673
674 exit:
675         OBD_FREE(qdata, sizeof(struct qunit_data));
676
677         RETURN(rc);
678 }
679
680 static int got_qunit(struct qunit_waiter *waiter)
681 {
682         int rc = 0;
683         ENTRY;
684         spin_lock(&qunit_hash_lock);
685         rc = list_empty(&waiter->qw_entry);
686         spin_unlock(&qunit_hash_lock);
687         RETURN(rc);
688 }
689
690 static int
691 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
692                struct qunit_data *qdata, int opc, int wait)
693 {
694         struct lustre_qunit *qunit, *empty;
695         struct qunit_waiter qw;
696         struct l_wait_info lwi = { 0 };
697         struct ptlrpc_request *req;
698         struct dqacq_async_args *aa;
699         int size[2] = { sizeof(struct ptlrpc_body), 0 };
700         struct obd_import *imp = NULL;
701         unsigned long factor;
702         int rc = 0;
703         ENTRY;
704
705         INIT_LIST_HEAD(&qw.qw_entry);
706         init_waitqueue_head(&qw.qw_waitq);
707         qw.qw_rc = 0;
708
709         if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
710                 RETURN(-ENOMEM);
711
712         spin_lock(&qunit_hash_lock);
713
714         qunit = dqacq_in_flight(qctxt, qdata);
715         if (qunit) {
716                 struct lustre_qunit_size *lqs = NULL;
717
718                 if (wait)
719                         list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
720                 spin_unlock(&qunit_hash_lock);
721                 free_qunit(empty);
722
723                 quota_search_lqs(qdata, NULL, qctxt, &lqs);
724                 if (lqs) {
725                         spin_lock(&lqs->lqs_lock);
726                         quota_compute_lqs(qdata, lqs, 0,
727                                           (opc == QUOTA_DQACQ) ? 1 : 0);
728                         spin_unlock(&lqs->lqs_lock);
729                         /* this is for quota_search_lqs */
730                         lqs_putref(lqs);
731                         /* this is for check_cur_qunit */
732                         lqs_putref(lqs);
733                 } else {
734                         CDEBUG(D_ERROR, "Can't find the lustre qunit size!\n");
735                 }
736
737                 goto wait_completion;
738         }
739         qunit = empty;
740         insert_qunit_nolock(qctxt, qunit);
741         if (wait)
742                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
743         spin_unlock(&qunit_hash_lock);
744
745         LASSERT(qunit);
746
747         QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
748                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
749         /* master is going to dqacq/dqrel from itself */
750         if (is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata))) {
751                 int rc2;
752                 QDATA_DEBUG(qdata, "local %s.\n",
753                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
754                 rc = qctxt->lqc_handler(obd, qdata, opc);
755                 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
756                 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
757         }
758
759         spin_lock(&qctxt->lqc_lock);
760         if (!qctxt->lqc_import) {
761                 spin_unlock(&qctxt->lqc_lock);
762                 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
763                 spin_lock(&qunit_hash_lock);
764                 if (wait)
765                         list_del_init(&qw.qw_entry);
766                 remove_qunit_nolock(qunit);
767                 qunit = NULL;
768                 spin_unlock(&qunit_hash_lock);
769                 compute_lqs_after_removing_qunit(qunit);
770                 free_qunit(empty);
771                 RETURN(-EAGAIN);
772         } else {
773                 imp = class_import_get(qctxt->lqc_import);
774         }
775         spin_unlock(&qctxt->lqc_lock);
776
777         /* build dqacq/dqrel request */
778         LASSERT(imp);
779         size[1] = quota_get_qunit_data_size(imp->
780                                             imp_connect_data.ocd_connect_flags);
781
782         req = ptlrpc_prep_req(imp, LUSTRE_MDS_VERSION, opc, 2,
783                               size, NULL);
784         if (!req) {
785                 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
786                 class_import_put(imp);
787                 RETURN(-ENOMEM);
788         }
789
790         if (QDATA_IS_BLK(qdata))
791                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
792                         qctxt->lqc_bunit_sz;
793         else
794                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
795                         qctxt->lqc_iunit_sz;
796
797         LASSERTF(!should_translate_quota(imp) ||
798                  qdata->qd_count <= factor,
799                  "qd_count: "LPU64"; should_translate_quota: %d.\n",
800                  qdata->qd_count, should_translate_quota(imp));
801         rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
802         if (rc < 0) {
803                 CDEBUG(D_ERROR, "Can't pack qunit_data\n");
804                 RETURN(-EPROTO);
805         }
806         ptlrpc_req_set_repsize(req, 2, size);
807         class_import_put(imp);
808
809         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
810         aa = (struct dqacq_async_args *)&req->rq_async_args;
811         aa->aa_ctxt = qctxt;
812         aa->aa_qunit = qunit;
813
814         req->rq_interpret_reply = dqacq_interpret;
815         ptlrpcd_add_req(req);
816
817         QDATA_DEBUG(qdata, "%s scheduled.\n",
818                     opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
819 wait_completion:
820         if (wait && qunit) {
821                 struct qunit_data *p = &qunit->lq_data;
822                 QDATA_DEBUG(p, "wait for dqacq.\n");
823
824                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
825                 if (qw.qw_rc == 0)
826                         rc = -EAGAIN;
827
828                 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
829         }
830         RETURN(rc);
831 }
832
833 int
834 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
835                    uid_t uid, gid_t gid, __u32 isblk, int wait)
836 {
837         int ret, rc = 0, i = USRQUOTA;
838         __u32 id[MAXQUOTAS] = { uid, gid };
839         struct qunit_data qdata[MAXQUOTAS];
840         ENTRY;
841
842         CLASSERT(MAXQUOTAS < 4);
843         if (!sb_any_quota_enabled(qctxt->lqc_sb))
844                 RETURN(0);
845
846         for (i = 0; i < MAXQUOTAS; i++) {
847                 qdata[i].qd_id = id[i];
848                 qdata[i].qd_flags = i;
849                 if (isblk)
850                         QDATA_SET_BLK(&qdata[i]);
851                 qdata[i].qd_count = 0;
852
853                 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
854                 if (ret > 0) {
855                         int opc;
856                         /* need acquire or release */
857                         opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
858                         ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i], 
859                                                           opc, wait);
860                         if (!rc)
861                                 rc = ret;
862                 }
863         }
864
865         RETURN(rc);
866 }
867
868 int
869 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
870                          unsigned short type, int isblk)
871 {
872         struct lustre_qunit *qunit = NULL;
873         struct qunit_waiter qw;
874         struct qunit_data qdata;
875         struct l_wait_info lwi = { 0 };
876         ENTRY;
877
878         INIT_LIST_HEAD(&qw.qw_entry);
879         init_waitqueue_head(&qw.qw_waitq);
880         qw.qw_rc = 0;
881
882         qdata.qd_id = id;
883         qdata.qd_flags = type;
884         if (isblk)
885                 QDATA_SET_BLK(&qdata);
886         qdata.qd_count = 0;
887
888         spin_lock(&qunit_hash_lock);
889
890         qunit = dqacq_in_flight(qctxt, &qdata);
891         if (qunit)
892                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
893
894         spin_unlock(&qunit_hash_lock);
895
896         if (qunit) {
897                 struct qunit_data *p = &qdata;
898                 QDATA_DEBUG(p, "wait for dqacq completion.\n");
899                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
900                 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
901         }
902         RETURN(0);
903 }
904
905 int
906 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
907            dqacq_handler_t handler)
908 {
909         int rc = 0;
910         ENTRY;
911
912         LASSERT(qctxt);
913
914         rc = ptlrpcd_addref();
915         if (rc)
916                 RETURN(rc);
917
918         spin_lock_init(&qctxt->lqc_lock);
919         spin_lock(&qctxt->lqc_lock);
920         qctxt->lqc_handler = handler;
921         qctxt->lqc_sb = sb;
922         qctxt->lqc_import = NULL;
923         qctxt->lqc_recovery = 0;
924         qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
925         qctxt->lqc_cqs_boundary_factor = 4;
926         qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
927         qctxt->lqc_cqs_least_iunit = 1;
928         qctxt->lqc_cqs_qs_factor = 2;
929         qctxt->lqc_atype = 0;
930         qctxt->lqc_status= 0;
931         qctxt->lqc_bunit_sz = default_bunit_sz;
932         qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
933         qctxt->lqc_iunit_sz = default_iunit_sz;
934         qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
935         qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
936                                           * after the last shrinking */
937         rc = lustre_hash_init(&LQC_HASH_BODY(qctxt), "LQS_HASH",128,
938                               &lqs_hash_operations);
939         if (rc) {
940                 CDEBUG(D_ERROR, "initialize hash lqs on ost error!\n");
941                 lustre_hash_exit(&LQC_HASH_BODY(qctxt));
942         }
943         spin_unlock(&qctxt->lqc_lock);
944
945         RETURN(rc);
946 }
947
948 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
949 {
950         struct lustre_qunit *qunit, *tmp;
951         struct qunit_waiter *qw, *tmp2;
952         struct list_head tmp_list;
953         int i;
954         ENTRY;
955
956         INIT_LIST_HEAD(&tmp_list);
957
958         spin_lock(&qunit_hash_lock);
959         for (i = 0; i < NR_DQHASH; i++) {
960                 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
961                         if (qunit->lq_ctxt != qctxt)
962                                 continue;
963                         remove_qunit_nolock(qunit);
964                         list_add(&qunit->lq_hash, &tmp_list);
965                 }
966         }
967         spin_unlock(&qunit_hash_lock);
968
969         list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
970                 list_del_init(&qunit->lq_hash);
971                 compute_lqs_after_removing_qunit(qunit);
972                 /* wake up all waiters */
973                 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
974                                          qw_entry) {
975                         list_del_init(&qw->qw_entry);
976                         qw->qw_rc = 0;
977                         wake_up(&qw->qw_waitq);
978                 }
979                 qunit_put(qunit);
980         }
981
982         lustre_hash_exit(&LQC_HASH_BODY(qctxt));
983         ptlrpcd_decref();
984
985         EXIT;
986 }
987
988 struct qslave_recov_thread_data {
989         struct obd_device *obd;
990         struct lustre_quota_ctxt *qctxt;
991         struct completion comp;
992 };
993
994 /* FIXME only recovery block quota by now */
995 static int qslave_recovery_main(void *arg)
996 {
997         struct qslave_recov_thread_data *data = arg;
998         struct obd_device *obd = data->obd;
999         struct lustre_quota_ctxt *qctxt = data->qctxt;
1000         unsigned int type;
1001         int rc = 0;
1002         ENTRY;
1003
1004         ptlrpc_daemonize("qslave_recovd");
1005
1006         complete(&data->comp);
1007
1008         if (qctxt->lqc_recovery)
1009                 RETURN(0);
1010         qctxt->lqc_recovery = 1;
1011
1012         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1013                 struct qunit_data qdata;
1014                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1015                 struct list_head id_list;
1016                 struct dquot_id *dqid, *tmp;
1017                 int ret;
1018
1019                 LOCK_DQONOFF_MUTEX(dqopt);
1020                 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
1021                         UNLOCK_DQONOFF_MUTEX(dqopt);
1022                         break;
1023                 }
1024
1025                 LASSERT(dqopt->files[type] != NULL);
1026                 INIT_LIST_HEAD(&id_list);
1027 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1028                 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1029 #else
1030                 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1031 #endif
1032                 UNLOCK_DQONOFF_MUTEX(dqopt);
1033                 if (rc)
1034                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1035
1036                 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1037                         list_del_init(&dqid->di_link);
1038                         /* skip slave recovery on itself */
1039                         if (is_master(obd, qctxt, dqid->di_id, type))
1040                                 goto free;
1041                         if (rc && rc != -EBUSY)
1042                                 goto free;
1043
1044                         qdata.qd_id = dqid->di_id;
1045                         qdata.qd_flags = type;
1046                         QDATA_SET_BLK(&qdata);
1047                         qdata.qd_count = 0;
1048
1049                         ret = check_cur_qunit(obd, qctxt, &qdata);
1050                         if (ret > 0) {
1051                                 int opc;
1052                                 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1053                                 rc = split_before_schedule_dqacq(obd, qctxt,
1054                                                                  &qdata, opc,
1055                                                                  0);
1056                         } else {
1057                                 rc = 0;
1058                         }
1059
1060                         if (rc)
1061                                 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
1062                                        "qslave recovery failed! (id:%d type:%d "
1063                                        " rc:%d)\n", dqid->di_id, type, rc);
1064 free:
1065                         kfree(dqid);
1066                 }
1067         }
1068
1069         qctxt->lqc_recovery = 0;
1070         RETURN(rc);
1071 }
1072
1073 void
1074 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1075 {
1076         struct qslave_recov_thread_data data;
1077         int rc;
1078         ENTRY;
1079
1080         if (!sb_any_quota_enabled(qctxt->lqc_sb))
1081                 goto exit;
1082
1083         data.obd = obd;
1084         data.qctxt = qctxt;
1085         init_completion(&data.comp);
1086
1087         rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
1088         if (rc < 0) {
1089                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1090                 goto exit;
1091         }
1092         wait_for_completion(&data.comp);
1093 exit:
1094         EXIT;
1095 }
1096