Whamcloud - gitweb
173a8d7528cd85b84701bc559dca68e2cdd2cefc
[fs/lustre-release.git] / lustre / quota / quota_context.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/quota/quota_context.c
5  *  Lustre Quota Context
6  *
7  *  Copyright (c) 2001-2005 Cluster File Systems, Inc.
8  *   Author: Niu YaWei <niu@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   No redistribution or use is permitted outside of Cluster File Systems, Inc.
13  *
14  */
15 #ifndef EXPORT_SYMTAB
16 # define EXPORT_SYMTAB
17 #endif
18
19 #define DEBUG_SUBSYSTEM S_MDS
20
21 #include <linux/version.h>
22 #include <linux/fs.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include <class_hash.h>
33 #include "quota_internal.h"
34
35 extern struct lustre_hash_operations lqs_hash_operations;
36
37 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
38 unsigned long default_btune_ratio = 50;             /* 50 percentage */
39 unsigned long default_iunit_sz = 5120;              /* 5120 inodes */
40 unsigned long default_itune_ratio = 50;             /* 50 percentage */
41
42 cfs_mem_cache_t *qunit_cachep = NULL;
43 struct list_head qunit_hash[NR_DQHASH];
44 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
45
46 struct lustre_qunit {
47         struct list_head lq_hash;          /* Hash list in memory */
48         atomic_t lq_refcnt;                /* Use count */
49         struct lustre_quota_ctxt *lq_ctxt; /* Quota context this applies to */
50         struct qunit_data lq_data;         /* See qunit_data */
51         unsigned int lq_opc;               /* QUOTA_DQACQ, QUOTA_DQREL */
52         struct list_head lq_waiters;       /* Threads waiting for this qunit */
53 };
54
55 int should_translate_quota (struct obd_import *imp)
56 {
57         ENTRY;
58
59         LASSERT(imp);
60 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(1, 7, 0, 0)
61         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64 &&
62             !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
63 #else
64         if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
65 #endif
66                 RETURN(0);
67         else
68                 RETURN(1);
69 }
70
71 void qunit_cache_cleanup(void)
72 {
73         int i;
74         ENTRY;
75
76         spin_lock(&qunit_hash_lock);
77         for (i = 0; i < NR_DQHASH; i++)
78                 LASSERT(list_empty(qunit_hash + i));
79         spin_unlock(&qunit_hash_lock);
80
81         if (qunit_cachep) {
82                 int rc;
83                 rc = cfs_mem_cache_destroy(qunit_cachep);
84                 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
85                 qunit_cachep = NULL;
86         }
87         EXIT;
88 }
89
90 int qunit_cache_init(void)
91 {
92         int i;
93         ENTRY;
94
95         LASSERT(qunit_cachep == NULL);
96         qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
97                                             sizeof(struct lustre_qunit),
98                                             0, 0);
99         if (!qunit_cachep)
100                 RETURN(-ENOMEM);
101
102         spin_lock(&qunit_hash_lock);
103         for (i = 0; i < NR_DQHASH; i++)
104                 INIT_LIST_HEAD(qunit_hash + i);
105         spin_unlock(&qunit_hash_lock);
106         RETURN(0);
107 }
108
109 static inline int
110 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
111              __attribute__((__const__));
112
113 static inline int
114 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
115 {
116         unsigned int id = qdata->qd_id;
117         unsigned int type = QDATA_IS_GRP(qdata);
118
119         unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
120         tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
121         return tmp;
122 }
123
124 /* caller must hold qunit_hash_lock */
125 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
126                                               struct lustre_quota_ctxt *qctxt,
127                                               struct qunit_data *qdata)
128 {
129         struct lustre_qunit *qunit = NULL;
130         struct qunit_data *tmp;
131
132         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
133         list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
134                 tmp = &qunit->lq_data;
135                 if (qunit->lq_ctxt == qctxt &&
136                     qdata->qd_id == tmp->qd_id &&
137                     (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
138                     (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
139                         return qunit;
140         }
141         return NULL;
142 }
143
144 /* check_cur_qunit - check the current usage of qunit.
145  * @qctxt: quota context
146  * @qdata: the type of quota unit to be checked
147  *
148  * return: 1 - need acquire qunit;
149  *         2 - need release qunit;
150  *         0 - need do nothing.
151  *       < 0 - error.
152  */
153 static int
154 check_cur_qunit(struct obd_device *obd,
155                 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
156 {
157         struct super_block *sb = qctxt->lqc_sb;
158         unsigned long qunit_sz, tune_sz;
159         __u64 usage, limit, limit_org, pending_write = 0;
160         long long record = 0;
161         struct obd_quotactl *qctl;
162         struct lustre_qunit_size *lqs = NULL;
163         int ret = 0;
164         ENTRY;
165
166         if (!sb_any_quota_enabled(sb))
167                 RETURN(0);
168
169         OBD_ALLOC_PTR(qctl);
170         if (qctl == NULL)
171                 RETURN(-ENOMEM);
172
173         /* get fs quota usage & limit */
174         qctl->qc_cmd = Q_GETQUOTA;
175         qctl->qc_id = qdata->qd_id;
176         qctl->qc_type = QDATA_IS_GRP(qdata);
177         ret = fsfilt_quotactl(obd, sb, qctl);
178         if (ret) {
179                 if (ret == -ESRCH)      /* no limit */
180                         ret = 0;
181                 else
182                         CERROR("can't get fs quota usage! (rc:%d)\n", ret);
183                 GOTO(out, ret);
184         }
185
186         if (QDATA_IS_BLK(qdata)) {
187                 usage = qctl->qc_dqblk.dqb_curspace;
188                 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
189         } else {
190                 usage = qctl->qc_dqblk.dqb_curinodes;
191                 limit = qctl->qc_dqblk.dqb_ihardlimit;
192         }
193
194         /* ignore the no quota limit case; and it can avoid creating
195          * unnecessary lqs for uid/gid */
196         if (!limit)
197                 GOTO(out, ret = 0);
198
199  search_lqs:
200         quota_search_lqs(qdata, NULL, qctxt, &lqs);
201         if (!lqs) {
202                 CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
203                 ret = quota_create_lqs(qdata, NULL, qctxt, &lqs);
204                 if (ret == -EALREADY) {
205                         ret = 0;
206                         goto search_lqs;
207                 }
208                 if (ret < 0)
209                         GOTO (out, ret);
210         }
211         spin_lock(&lqs->lqs_lock);
212
213         if (QDATA_IS_BLK(qdata)) {
214                 qunit_sz = lqs->lqs_bunit_sz;
215                 tune_sz  = lqs->lqs_btune_sz;
216                 pending_write = lqs->lqs_bwrite_pending * CFS_PAGE_SIZE;
217                 record   = lqs->lqs_blk_rec;
218                 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
219         } else {
220                 /* we didn't need change inode qunit size now */
221                 qunit_sz = lqs->lqs_iunit_sz;
222                 tune_sz  = lqs->lqs_itune_sz;
223                 pending_write = lqs->lqs_iwrite_pending;
224                 record   = lqs->lqs_ino_rec;
225         }
226
227         /* we don't count the MIN_QLIMIT */
228         if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
229             (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
230                 limit = 0;
231
232         usage += pending_write;
233         limit_org = limit;
234         /* when a releasing quota req is sent, before it returned
235            limit is assigned a small value. limit will overflow */
236         if (limit + record < 0)
237                 usage -= record;
238         else
239                 limit += record;
240
241         LASSERT(qdata->qd_count == 0);
242         if (limit <= usage + tune_sz) {
243                 while (qdata->qd_count + limit <=
244                        usage + tune_sz)
245                         qdata->qd_count += qunit_sz;
246                 ret = 1;
247         } else if (limit > usage + qunit_sz + tune_sz &&
248                    limit_org > qdata->qd_count + qunit_sz) {
249                 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
250                        limit_org > qdata->qd_count + qunit_sz)
251                         qdata->qd_count += qunit_sz;
252                 ret = 2;
253         }
254         CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
255                ", pending_write: "LPU64", record: "LPD64
256                ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
257                QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
258                record, qunit_sz, tune_sz, ret);
259         LASSERT(ret == 0 || qdata->qd_count);
260
261         spin_unlock(&lqs->lqs_lock);
262         lqs_putref(lqs);
263         EXIT;
264  out:
265         OBD_FREE_PTR(qctl);
266         return ret;
267 }
268
269 /* compute the remaining quota for certain gid or uid b=11693 */
270 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
271                      struct qunit_data *qdata, int isblk)
272 {
273         struct super_block *sb = qctxt->lqc_sb;
274         __u64 usage, limit;
275         struct obd_quotactl *qctl;
276         int ret = QUOTA_RET_OK;
277         ENTRY;
278
279         if (!sb_any_quota_enabled(sb))
280                 RETURN(QUOTA_RET_NOQUOTA);
281
282         /* ignore root user */
283         if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
284                 RETURN(QUOTA_RET_NOLIMIT);
285
286         OBD_ALLOC_PTR(qctl);
287         if (qctl == NULL)
288                 RETURN(-ENOMEM);
289
290         /* get fs quota usage & limit */
291         qctl->qc_cmd = Q_GETQUOTA;
292         qctl->qc_id = qdata->qd_id;
293         qctl->qc_type = QDATA_IS_GRP(qdata);
294         ret = fsfilt_quotactl(obd, sb, qctl);
295         if (ret) {
296                 if (ret == -ESRCH)      /* no limit */
297                         ret = QUOTA_RET_NOLIMIT;
298                 else
299                         CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
300                                ret);
301                 GOTO(out, ret);
302         }
303
304         usage = isblk ? qctl->qc_dqblk.dqb_curspace :
305                 qctl->qc_dqblk.dqb_curinodes;
306         limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
307                 qctl->qc_dqblk.dqb_ihardlimit;
308         if (!limit){            /* no limit */
309                 ret = QUOTA_RET_NOLIMIT;
310                 GOTO(out, ret);
311         }
312
313         if (limit >= usage)
314                 qdata->qd_count = limit - usage;
315         else
316                 qdata->qd_count = 0;
317         EXIT;
318 out:
319         OBD_FREE_PTR(qctl);
320         return ret;
321 }
322
323 /* caller must hold qunit_hash_lock */
324 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
325                                             struct qunit_data *qdata)
326 {
327         unsigned int hashent = qunit_hashfn(qctxt, qdata);
328         struct lustre_qunit *qunit;
329         ENTRY;
330
331         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
332         qunit = find_qunit(hashent, qctxt, qdata);
333         RETURN(qunit);
334 }
335
336 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
337                                         struct qunit_data *qdata, int opc)
338 {
339         struct lustre_qunit *qunit = NULL;
340         ENTRY;
341
342         OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
343         if (qunit == NULL)
344                 RETURN(NULL);
345
346         INIT_LIST_HEAD(&qunit->lq_hash);
347         INIT_LIST_HEAD(&qunit->lq_waiters);
348         atomic_set(&qunit->lq_refcnt, 1);
349         qunit->lq_ctxt = qctxt;
350         memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
351         qunit->lq_opc = opc;
352
353         RETURN(qunit);
354 }
355
356 static inline void free_qunit(struct lustre_qunit *qunit)
357 {
358         OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
359 }
360
361 static inline void qunit_get(struct lustre_qunit *qunit)
362 {
363         atomic_inc(&qunit->lq_refcnt);
364 }
365
366 static void qunit_put(struct lustre_qunit *qunit)
367 {
368         LASSERT(atomic_read(&qunit->lq_refcnt));
369         if (atomic_dec_and_test(&qunit->lq_refcnt))
370                 free_qunit(qunit);
371 }
372
373 static void
374 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
375 {
376         struct list_head *head;
377
378         LASSERT(list_empty(&qunit->lq_hash));
379         head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
380         list_add(&qunit->lq_hash, head);
381 }
382
383 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
384 {
385         struct lustre_qunit_size *lqs = NULL;
386
387         quota_search_lqs(&qunit->lq_data, NULL, qunit->lq_ctxt, &lqs);
388         if (lqs) {
389                 spin_lock(&lqs->lqs_lock);
390                 if (qunit->lq_opc == QUOTA_DQACQ)
391                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
392                 if (qunit->lq_opc == QUOTA_DQREL)
393                         quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
394                 spin_unlock(&lqs->lqs_lock);
395                 /* this is for quota_search_lqs */
396                 lqs_putref(lqs);
397                 /* this is for schedule_dqacq */
398                 lqs_putref(lqs);
399         }
400
401 }
402
403 static void remove_qunit_nolock(struct lustre_qunit *qunit)
404 {
405         LASSERT(!list_empty(&qunit->lq_hash));
406         LASSERT_SPIN_LOCKED(&qunit_hash_lock);
407
408         list_del_init(&qunit->lq_hash);
409 }
410
411 struct qunit_waiter {
412         struct list_head qw_entry;
413         cfs_waitq_t      qw_waitq;
414         int qw_rc;
415 };
416
417 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
418                                  (limit = count) : (limit += count)
419
420
421 /* FIXME check if this mds is the master of specified id */
422 static int
423 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
424           unsigned int id, int type)
425 {
426         return qctxt->lqc_handler ? 1 : 0;
427 }
428
429 static int
430 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
431                struct qunit_data *qdata, int opc, int wait);
432
433 static int split_before_schedule_dqacq(struct obd_device *obd,
434                                        struct lustre_quota_ctxt *qctxt,
435                                        struct qunit_data *qdata, int opc, int wait)
436 {
437         int rc = 0;
438         unsigned long factor;
439         struct qunit_data tmp_qdata;
440         ENTRY;
441
442         LASSERT(qdata && qdata->qd_count);
443         QDATA_DEBUG(qdata, "%s quota split.\n",
444                     QDATA_IS_BLK(qdata) ? "block" : "inode");
445         if (QDATA_IS_BLK(qdata))
446                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
447                         qctxt->lqc_bunit_sz;
448         else
449                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
450                         qctxt->lqc_iunit_sz;
451
452         if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
453             qdata->qd_count > factor) {
454                 tmp_qdata = *qdata;
455                 tmp_qdata.qd_count = factor;
456                 qdata->qd_count -= tmp_qdata.qd_count;
457                 QDATA_DEBUG((&tmp_qdata), "be split.\n");
458                 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
459         } else{
460                 QDATA_DEBUG(qdata, "don't be split.\n");
461                 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
462         }
463
464         RETURN(rc);
465 }
466
467 static int
468 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
469                  struct qunit_data *qdata, int rc, int opc)
470 {
471         struct lustre_qunit *qunit = NULL;
472         struct super_block *sb = qctxt->lqc_sb;
473         struct qunit_waiter *qw, *tmp;
474         int err = 0;
475         struct quota_adjust_qunit *oqaq = NULL;
476         int rc1 = 0;
477         ENTRY;
478
479         LASSERT(qdata);
480         QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
481                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
482
483         /* update local operational quota file */
484         if (rc == 0) {
485                 __u32 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
486                 struct obd_quotactl *qctl;
487                 __u64 *hardlimit;
488
489                 OBD_ALLOC_PTR(qctl);
490                 if (qctl == NULL)
491                         GOTO(out, err = -ENOMEM);
492
493                 /* acq/rel qunit for specified uid/gid is serialized,
494                  * so there is no race between get fs quota limit and
495                  * set fs quota limit */
496                 qctl->qc_cmd = Q_GETQUOTA;
497                 qctl->qc_id = qdata->qd_id;
498                 qctl->qc_type = QDATA_IS_GRP(qdata);
499                 err = fsfilt_quotactl(obd, sb, qctl);
500                 if (err) {
501                         CERROR("error get quota fs limit! (rc:%d)\n", err);
502                         GOTO(out_mem, err);
503                 }
504
505                 if (QDATA_IS_BLK(qdata)) {
506                         qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
507                         hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
508                 } else {
509                         qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
510                         hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
511                 }
512
513                 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
514                 switch (opc) {
515                 case QUOTA_DQACQ:
516                         INC_QLIMIT(*hardlimit, count);
517                         break;
518                 case QUOTA_DQREL:
519                         LASSERTF(count < *hardlimit,
520                                  "count: %d, hardlimit: "LPU64".\n",
521                                  count, *hardlimit);
522                         *hardlimit -= count;
523                         break;
524                 default:
525                         LBUG();
526                 }
527
528                 /* clear quota limit */
529                 if (count == 0)
530                         *hardlimit = 0;
531
532                 qctl->qc_cmd = Q_SETQUOTA;
533                 err = fsfilt_quotactl(obd, sb, qctl);
534                 if (err)
535                         CERROR("error set quota fs limit! (rc:%d)\n", err);
536
537                 QDATA_DEBUG(qdata, "%s completion\n",
538                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
539 out_mem:
540                 OBD_FREE_PTR(qctl);
541         } else if (rc == -EDQUOT) {
542                 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
543         } else if (rc == -EBUSY) {
544                 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
545         } else {
546                 CERROR("acquire qunit got error! (rc:%d)\n", rc);
547         }
548 out:
549         /* remove the qunit from hash */
550         spin_lock(&qunit_hash_lock);
551
552         qunit = dqacq_in_flight(qctxt, qdata);
553         /* this qunit has been removed by qctxt_cleanup() */
554         if (!qunit) {
555                 spin_unlock(&qunit_hash_lock);
556                 RETURN(err);
557         }
558
559         LASSERT(opc == qunit->lq_opc);
560         remove_qunit_nolock(qunit);
561         spin_unlock(&qunit_hash_lock);
562
563         compute_lqs_after_removing_qunit(qunit);
564
565         /* wake up all waiters */
566         list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
567                 list_del_init(&qw->qw_entry);
568                 qw->qw_rc = rc;
569                 wake_up(&qw->qw_waitq);
570         }
571
572         qunit_put(qunit);
573
574         /* don't reschedule in such cases:
575          *   - acq/rel failure and qunit isn't changed,
576          *     but not for quota recovery.
577          *   - local dqacq/dqrel.
578          *   - local disk io failure.
579          */
580          OBD_ALLOC_PTR(oqaq);
581          if (!oqaq)
582                  RETURN(-ENOMEM);
583          qdata_to_oqaq(qdata, oqaq);
584          /* adjust the qunit size in slaves */
585          rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
586          OBD_FREE_PTR(oqaq);
587          if (rc1 < 0) {
588                  CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
589                  RETURN(rc1);
590          }
591          if (err || (rc && rc != -EBUSY && rc1 == 0) ||
592              is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata)))
593                 RETURN(err);
594
595         /* reschedule another dqacq/dqrel if needed */
596         qdata->qd_count = 0;
597         qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
598         rc1 = check_cur_qunit(obd, qctxt, qdata);
599         if (rc1 > 0) {
600                 int opc;
601                 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
602                 rc1 = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
603                 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
604         }
605         RETURN(err);
606 }
607
608 struct dqacq_async_args {
609         struct lustre_quota_ctxt *aa_ctxt;
610         struct lustre_qunit *aa_qunit;
611 };
612
613 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
614 {
615         struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
616         struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
617         struct lustre_qunit *qunit = aa->aa_qunit;
618         struct obd_device *obd = req->rq_import->imp_obd;
619         struct qunit_data *qdata = NULL;
620         int rc1 = 0;
621         ENTRY;
622
623         LASSERT(req);
624         LASSERT(req->rq_import);
625
626         /* there are several forms of qunit(historic causes), so we need to
627          * adjust qunit from slaves to the same form here */
628         OBD_ALLOC(qdata, sizeof(struct qunit_data));
629         if (!qdata)
630                 RETURN(-ENOMEM);
631         rc1 = quota_get_qdata(req, qdata, QUOTA_REPLY, QUOTA_IMPORT);
632         if (rc1 < 0) {
633                 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
634                 GOTO(exit, rc = -EPROTO);
635         }
636
637         QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
638         QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
639
640         if (qdata->qd_id != qunit->lq_data.qd_id ||
641             OBD_FAIL_CHECK_ONCE(OBD_FAIL_QUOTA_RET_QDATA)) {
642                 CDEBUG(D_ERROR, "the returned qd_id isn't expected!"
643                        "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
644                        qunit->lq_data.qd_id);
645                 qdata->qd_id = qunit->lq_data.qd_id;
646                 rc = -EPROTO;
647         }
648         if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
649                 CDEBUG(D_ERROR, "the returned grp/usr isn't expected!"
650                        "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
651                        qunit->lq_data.qd_flags);
652                 if (QDATA_IS_GRP(&qunit->lq_data))
653                         QDATA_SET_GRP(qdata);
654                 else
655                         QDATA_CLR_GRP(qdata);
656                 rc = -EPROTO;
657         }
658         if (qdata->qd_count > qunit->lq_data.qd_count) {
659                 CDEBUG(D_ERROR, "the returned qd_count isn't expected!"
660                        "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
661                        qunit->lq_data.qd_count);
662                 rc = -EPROTO;
663         }
664
665         rc = dqacq_completion(obd, qctxt, qdata, rc,
666                               lustre_msg_get_opc(req->rq_reqmsg));
667
668 exit:
669         OBD_FREE(qdata, sizeof(struct qunit_data));
670
671         RETURN(rc);
672 }
673
674 static int got_qunit(struct qunit_waiter *waiter)
675 {
676         int rc = 0;
677         ENTRY;
678         spin_lock(&qunit_hash_lock);
679         rc = list_empty(&waiter->qw_entry);
680         spin_unlock(&qunit_hash_lock);
681         RETURN(rc);
682 }
683
684 static int
685 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
686                struct qunit_data *qdata, int opc, int wait)
687 {
688         struct lustre_qunit *qunit, *empty;
689         struct qunit_waiter qw;
690         struct l_wait_info lwi = { 0 };
691         struct ptlrpc_request *req;
692         struct dqacq_async_args *aa;
693         int size[2] = { sizeof(struct ptlrpc_body), 0 };
694         struct obd_import *imp = NULL;
695         unsigned long factor;
696         struct lustre_qunit_size *lqs = NULL;
697         int rc = 0;
698         ENTRY;
699
700         INIT_LIST_HEAD(&qw.qw_entry);
701         init_waitqueue_head(&qw.qw_waitq);
702         qw.qw_rc = 0;
703
704         if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
705                 RETURN(-ENOMEM);
706
707         spin_lock(&qunit_hash_lock);
708
709         qunit = dqacq_in_flight(qctxt, qdata);
710         if (qunit) {
711                 if (wait)
712                         list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
713                 spin_unlock(&qunit_hash_lock);
714                 free_qunit(empty);
715
716                 goto wait_completion;
717         }
718         qunit = empty;
719         insert_qunit_nolock(qctxt, qunit);
720         if (wait)
721                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
722         spin_unlock(&qunit_hash_lock);
723
724         LASSERT(qunit);
725
726         quota_search_lqs(qdata, NULL, qctxt, &lqs);
727         if (lqs) {
728                 spin_lock(&lqs->lqs_lock);
729                 quota_compute_lqs(qdata, lqs, 1, (opc == QUOTA_DQACQ) ? 1 : 0);
730                 /* when this qdata returned from mds, it will call lqs_putref */
731                 lqs_getref(lqs);
732                 spin_unlock(&lqs->lqs_lock);
733                 /* this is for quota_search_lqs */
734                 lqs_putref(lqs);
735         } else {
736                 CDEBUG(D_ERROR, "Can't find the lustre qunit size!\n");
737         }
738
739         QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
740                     obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
741         /* master is going to dqacq/dqrel from itself */
742         if (is_master(obd, qctxt, qdata->qd_id, QDATA_IS_GRP(qdata))) {
743                 int rc2;
744                 QDATA_DEBUG(qdata, "local %s.\n",
745                             opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
746                 rc = qctxt->lqc_handler(obd, qdata, opc);
747                 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
748                 RETURN(rc ? rc : rc2);
749         }
750
751         spin_lock(&qctxt->lqc_lock);
752         if (!qctxt->lqc_import) {
753                 spin_unlock(&qctxt->lqc_lock);
754                 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
755                 spin_lock(&qunit_hash_lock);
756                 if (wait)
757                         list_del_init(&qw.qw_entry);
758                 remove_qunit_nolock(qunit);
759                 qunit = NULL;
760                 spin_unlock(&qunit_hash_lock);
761                 compute_lqs_after_removing_qunit(qunit);
762                 free_qunit(empty);
763                 RETURN(-EAGAIN);
764         } else {
765                 imp = class_import_get(qctxt->lqc_import);
766         }
767         spin_unlock(&qctxt->lqc_lock);
768
769         /* build dqacq/dqrel request */
770         LASSERT(imp);
771         size[1] = quota_get_qunit_data_size(imp->
772                                             imp_connect_data.ocd_connect_flags);
773
774         req = ptlrpc_prep_req(imp, LUSTRE_MDS_VERSION, opc, 2,
775                               size, NULL);
776         if (!req) {
777                 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
778                 class_import_put(imp);
779                 RETURN(-ENOMEM);
780         }
781
782         if (QDATA_IS_BLK(qdata))
783                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
784                         qctxt->lqc_bunit_sz;
785         else
786                 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
787                         qctxt->lqc_iunit_sz;
788
789         LASSERTF(!should_translate_quota(imp) ||
790                  qdata->qd_count <= factor,
791                  "qd_count: "LPU64"; should_translate_quota: %d.\n",
792                  qdata->qd_count, should_translate_quota(imp));
793         rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
794         if (rc < 0) {
795                 CDEBUG(D_ERROR, "Can't pack qunit_data\n");
796                 RETURN(-EPROTO);
797         }
798         ptlrpc_req_set_repsize(req, 2, size);
799         class_import_put(imp);
800
801         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
802         aa = (struct dqacq_async_args *)&req->rq_async_args;
803         aa->aa_ctxt = qctxt;
804         aa->aa_qunit = qunit;
805
806         req->rq_interpret_reply = dqacq_interpret;
807         ptlrpcd_add_req(req);
808
809         QDATA_DEBUG(qdata, "%s scheduled.\n",
810                     opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
811 wait_completion:
812         if (wait && qunit) {
813                 struct qunit_data *p = &qunit->lq_data;
814                 QDATA_DEBUG(p, "wait for dqacq.\n");
815
816                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
817                 /* rc = -EAGAIN, it means a quota req is finished;
818                  * rc = -EDQUOT, it means out of quota
819                  * rc = -EBUSY, it means recovery is happening
820                  * other rc < 0, it means real errors, functions who call
821                  * schedule_dqacq should take care of this */
822                 if (qw.qw_rc == 0)
823                         rc = -EAGAIN;
824                 else
825                         rc = qw.qw_rc;
826
827                 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
828         }
829         RETURN(rc);
830 }
831
832 int
833 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
834                    uid_t uid, gid_t gid, __u32 isblk, int wait)
835 {
836         int ret, rc = 0, i = USRQUOTA;
837         __u32 id[MAXQUOTAS] = { uid, gid };
838         struct qunit_data qdata[MAXQUOTAS];
839         ENTRY;
840
841         CLASSERT(MAXQUOTAS < 4);
842         if (!sb_any_quota_enabled(qctxt->lqc_sb))
843                 RETURN(0);
844
845         for (i = 0; i < MAXQUOTAS; i++) {
846                 qdata[i].qd_id = id[i];
847                 qdata[i].qd_flags = i;
848                 if (isblk)
849                         QDATA_SET_BLK(&qdata[i]);
850                 qdata[i].qd_count = 0;
851
852                 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
853                 if (ret > 0) {
854                         int opc;
855                         /* need acquire or release */
856                         opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
857                         ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i], 
858                                                           opc, wait);
859                         if (!rc)
860                                 rc = ret;
861                 } else if (wait == 1) {
862                         /* when wait equates 1, that means mds_quota_acquire
863                          * or filter_quota_acquire is calling it. */
864                         qctxt_wait_pending_dqacq(qctxt, id[i], i, isblk);
865                 }
866         }
867
868         RETURN(rc);
869 }
870
871 int
872 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
873                          unsigned short type, int isblk)
874 {
875         struct lustre_qunit *qunit = NULL;
876         struct qunit_waiter qw;
877         struct qunit_data qdata;
878         struct l_wait_info lwi = { 0 };
879         ENTRY;
880
881         INIT_LIST_HEAD(&qw.qw_entry);
882         init_waitqueue_head(&qw.qw_waitq);
883         qw.qw_rc = 0;
884
885         qdata.qd_id = id;
886         qdata.qd_flags = type;
887         if (isblk)
888                 QDATA_SET_BLK(&qdata);
889         qdata.qd_count = 0;
890
891         spin_lock(&qunit_hash_lock);
892
893         qunit = dqacq_in_flight(qctxt, &qdata);
894         if (qunit)
895                 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
896
897         spin_unlock(&qunit_hash_lock);
898
899         if (qunit) {
900                 struct qunit_data *p = &qdata;
901                 QDATA_DEBUG(p, "wait for dqacq completion.\n");
902                 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
903                 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
904         }
905         RETURN(0);
906 }
907
908 int
909 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
910            dqacq_handler_t handler)
911 {
912         int rc = 0;
913         ENTRY;
914
915         LASSERT(qctxt);
916
917         rc = ptlrpcd_addref();
918         if (rc)
919                 RETURN(rc);
920
921         spin_lock_init(&qctxt->lqc_lock);
922         spin_lock(&qctxt->lqc_lock);
923         qctxt->lqc_handler = handler;
924         qctxt->lqc_sb = sb;
925         qctxt->lqc_import = NULL;
926         qctxt->lqc_recovery = 0;
927         qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
928         qctxt->lqc_cqs_boundary_factor = 4;
929         qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
930         qctxt->lqc_cqs_least_iunit = 1;
931         qctxt->lqc_cqs_qs_factor = 2;
932         qctxt->lqc_atype = 0;
933         qctxt->lqc_status= 0;
934         qctxt->lqc_bunit_sz = default_bunit_sz;
935         qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
936         qctxt->lqc_iunit_sz = default_iunit_sz;
937         qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
938         qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
939                                           * after the last shrinking */
940         rc = lustre_hash_init(&LQC_HASH_BODY(qctxt), "LQS_HASH",128,
941                               &lqs_hash_operations);
942         if (rc) {
943                 CDEBUG(D_ERROR, "initialize hash lqs on ost error!\n");
944                 lustre_hash_exit(&LQC_HASH_BODY(qctxt));
945         }
946         spin_unlock(&qctxt->lqc_lock);
947
948         RETURN(rc);
949 }
950
951 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
952 {
953         struct lustre_qunit *qunit, *tmp;
954         struct qunit_waiter *qw, *tmp2;
955         struct list_head tmp_list;
956         int i;
957         ENTRY;
958
959         INIT_LIST_HEAD(&tmp_list);
960
961         spin_lock(&qunit_hash_lock);
962         for (i = 0; i < NR_DQHASH; i++) {
963                 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
964                         if (qunit->lq_ctxt != qctxt)
965                                 continue;
966                         remove_qunit_nolock(qunit);
967                         list_add(&qunit->lq_hash, &tmp_list);
968                 }
969         }
970         spin_unlock(&qunit_hash_lock);
971
972         list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
973                 list_del_init(&qunit->lq_hash);
974                 compute_lqs_after_removing_qunit(qunit);
975                 /* wake up all waiters */
976                 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
977                                          qw_entry) {
978                         list_del_init(&qw->qw_entry);
979                         qw->qw_rc = 0;
980                         wake_up(&qw->qw_waitq);
981                 }
982                 qunit_put(qunit);
983         }
984
985         lustre_hash_exit(&LQC_HASH_BODY(qctxt));
986         ptlrpcd_decref();
987
988         EXIT;
989 }
990
991 struct qslave_recov_thread_data {
992         struct obd_device *obd;
993         struct lustre_quota_ctxt *qctxt;
994         struct completion comp;
995 };
996
997 /* FIXME only recovery block quota by now */
998 static int qslave_recovery_main(void *arg)
999 {
1000         struct qslave_recov_thread_data *data = arg;
1001         struct obd_device *obd = data->obd;
1002         struct lustre_quota_ctxt *qctxt = data->qctxt;
1003         unsigned int type;
1004         int rc = 0;
1005         ENTRY;
1006
1007         ptlrpc_daemonize("qslave_recovd");
1008
1009         complete(&data->comp);
1010
1011         if (qctxt->lqc_recovery)
1012                 RETURN(0);
1013         qctxt->lqc_recovery = 1;
1014
1015         for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1016                 struct qunit_data qdata;
1017                 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1018                 struct list_head id_list;
1019                 struct dquot_id *dqid, *tmp;
1020                 int ret;
1021
1022                 LOCK_DQONOFF_MUTEX(dqopt);
1023                 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
1024                         UNLOCK_DQONOFF_MUTEX(dqopt);
1025                         break;
1026                 }
1027
1028                 LASSERT(dqopt->files[type] != NULL);
1029                 INIT_LIST_HEAD(&id_list);
1030 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1031                 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1032 #else
1033                 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1034 #endif
1035                 UNLOCK_DQONOFF_MUTEX(dqopt);
1036                 if (rc)
1037                         CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1038
1039                 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1040                         list_del_init(&dqid->di_link);
1041                         /* skip slave recovery on itself */
1042                         if (is_master(obd, qctxt, dqid->di_id, type))
1043                                 goto free;
1044                         if (rc && rc != -EBUSY)
1045                                 goto free;
1046
1047                         qdata.qd_id = dqid->di_id;
1048                         qdata.qd_flags = type;
1049                         QDATA_SET_BLK(&qdata);
1050                         qdata.qd_count = 0;
1051
1052                         ret = check_cur_qunit(obd, qctxt, &qdata);
1053                         if (ret > 0) {
1054                                 int opc;
1055                                 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1056                                 rc = split_before_schedule_dqacq(obd, qctxt,
1057                                                                  &qdata, opc,
1058                                                                  0);
1059                                 if (rc == -EDQUOT)
1060                                         rc = 0;
1061                         } else {
1062                                 rc = 0;
1063                         }
1064
1065                         if (rc)
1066                                 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
1067                                        "qslave recovery failed! (id:%d type:%d "
1068                                        " rc:%d)\n", dqid->di_id, type, rc);
1069 free:
1070                         kfree(dqid);
1071                 }
1072         }
1073
1074         qctxt->lqc_recovery = 0;
1075         RETURN(rc);
1076 }
1077
1078 void
1079 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1080 {
1081         struct qslave_recov_thread_data data;
1082         int rc;
1083         ENTRY;
1084
1085         if (!sb_any_quota_enabled(qctxt->lqc_sb))
1086                 goto exit;
1087
1088         data.obd = obd;
1089         data.qctxt = qctxt;
1090         init_completion(&data.comp);
1091
1092         rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
1093         if (rc < 0) {
1094                 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1095                 goto exit;
1096         }
1097         wait_for_completion(&data.comp);
1098 exit:
1099         EXIT;
1100 }
1101