Whamcloud - gitweb
3eab61833b5cc6f7199a9df18e355246e4569280
[fs/lustre-release.git] / lustre / quota / quota_interface.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_LQUOTA
38
39 #ifdef __KERNEL__
40 # include <linux/version.h>
41 # include <linux/module.h>
42 # include <linux/init.h>
43 # include <linux/fs.h>
44 # include <linux/jbd.h>
45 # include <linux/buffer_head.h>
46 # include <linux/workqueue.h>
47 # include <linux/mount.h>
48 #else /* __KERNEL__ */
49 # include <liblustre.h>
50 #endif
51
52 #include <obd_class.h>
53 #include <lustre_mds.h>
54 #include <lustre_dlm.h>
55 #include <lustre_cfg.h>
56 #include <obd_ost.h>
57 #include <lustre_fsfilt.h>
58 #include <lustre_quota.h>
59 #include <lprocfs_status.h>
60 #include "quota_internal.h"
61
62 #ifdef __KERNEL__
63
64 static cfs_time_t last_print = 0;
65 static DEFINE_SPINLOCK(last_print_lock);
66
67 static int filter_quota_setup(struct obd_device *obd)
68 {
69         int rc = 0;
70         struct obd_device_target *obt = &obd->u.obt;
71         ENTRY;
72
73         cfs_init_rwsem(&obt->obt_rwsem);
74         obt->obt_qfmt = LUSTRE_QUOTA_V2;
75         cfs_sema_init(&obt->obt_quotachecking, 1);
76         rc = qctxt_init(obd, NULL);
77         if (rc)
78                 CERROR("initialize quota context failed! (rc:%d)\n", rc);
79
80         RETURN(rc);
81 }
82
83 static int filter_quota_cleanup(struct obd_device *obd)
84 {
85         ENTRY;
86         qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
87         RETURN(0);
88 }
89
90 static int filter_quota_setinfo(struct obd_device *obd, void *data)
91 {
92         struct obd_export *exp = data;
93         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
94         struct obd_import *imp = exp->exp_imp_reverse;
95         ENTRY;
96
97         LASSERT(imp != NULL);
98
99         /* setup the quota context import */
100         cfs_spin_lock(&qctxt->lqc_lock);
101         if (qctxt->lqc_import != NULL) {
102                 cfs_spin_unlock(&qctxt->lqc_lock);
103                 if (qctxt->lqc_import == imp)
104                         CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
105                                "activated already.\n", obd->obd_name, imp, obd);
106                 else
107                         CERROR("%s: lqc_import(%p:%p) of obd(%p) was "
108                                "activated by others.\n", obd->obd_name,
109                                qctxt->lqc_import, imp, obd);
110         } else {
111                 qctxt->lqc_import = imp;
112                 /* make imp's connect flags equal relative exp's connect flags
113                  * adding it to avoid the scan export list */
114                 imp->imp_connect_data.ocd_connect_flags |=
115                                 (exp->exp_connect_flags &
116                                  (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
117                 cfs_spin_unlock(&qctxt->lqc_lock);
118                 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
119                        "now.\n", obd->obd_name, imp, obd);
120
121                 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
122                 /* start quota slave recovery thread. (release high limits) */
123                 qslave_start_recovery(obd, qctxt);
124         }
125         RETURN(0);
126 }
127
128 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
129 {
130         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
131         struct obd_import *imp = exp->exp_imp_reverse;
132         ENTRY;
133
134         /* lquota may be not set up before destroying export, b=14896 */
135         if (!obd->obd_set_up)
136                 RETURN(0);
137
138         if (unlikely(imp == NULL))
139                 RETURN(0);
140
141         /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
142          * should be invalid b=12374 */
143         cfs_spin_lock(&qctxt->lqc_lock);
144         if (qctxt->lqc_import == imp) {
145                 qctxt->lqc_import = NULL;
146                 cfs_spin_unlock(&qctxt->lqc_lock);
147                 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
148                        obd->obd_name, imp, obd);
149                 ptlrpc_cleanup_imp(imp);
150                 dqacq_interrupt(qctxt);
151         } else {
152                 cfs_spin_unlock(&qctxt->lqc_lock);
153         }
154         RETURN(0);
155 }
156
157 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
158 {
159         ENTRY;
160
161         if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
162                 RETURN(0);
163
164         if (ignore) {
165                 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
166                 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
167         } else {
168                 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
169         }
170
171         RETURN(0);
172 }
173
174 #define GET_OA_ID(flag, oa) (flag == USRQUOTA ? oa->o_uid : oa->o_gid)
175 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
176 {
177         struct obd_device_target *obt = &obd->u.obt;
178         struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
179         int err, cnt, rc = 0;
180         struct obd_quotactl *oqctl;
181         ENTRY;
182
183         if (!ll_sb_any_quota_active(obt->obt_sb))
184                 RETURN(0);
185
186         OBD_ALLOC_PTR(oqctl);
187         if (!oqctl)
188                 RETURN(-ENOMEM);
189
190         /* set over quota flags for a uid/gid */
191         oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
192         oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
193
194         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
195                 struct lustre_qunit_size *lqs = NULL;
196
197                 /* check if quota is enabled */
198                 if (!ll_sb_has_quota_active(obt->obt_sb, cnt))
199                         continue;
200
201                 lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)),
202                                        qctxt, 0);
203                 if (IS_ERR(lqs)) {
204                         rc = PTR_ERR(lqs);
205                         CDEBUG(D_QUOTA, "search lqs for %s %d failed, "
206                                "(rc = %d)\n",
207                                cnt == USRQUOTA ? "user" : "group",
208                                GET_OA_ID(cnt, oa), rc);
209                         break;
210                 } else if (lqs == NULL) {
211                         /* continue to check group quota if the file's owner
212                          * doesn't have quota limit. LU-530 */
213                         continue;
214                 } else {
215                         cfs_spin_lock(&lqs->lqs_lock);
216                         if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
217                                 oa->o_flags |= (cnt == USRQUOTA) ?
218                                         OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
219                                 cfs_spin_unlock(&lqs->lqs_lock);
220                                 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
221                                        "sync_blk(%d)\n", lqs->lqs_bunit_sz,
222                                        qctxt->lqc_sync_blk);
223                                 /* this is for quota_search_lqs */
224                                 lqs_putref(lqs);
225                                 continue;
226                         }
227                         cfs_spin_unlock(&lqs->lqs_lock);
228                         /* this is for quota_search_lqs */
229                         lqs_putref(lqs);
230                 }
231
232                 memset(oqctl, 0, sizeof(*oqctl));
233
234                 oqctl->qc_cmd = Q_GETQUOTA;
235                 oqctl->qc_type = cnt;
236                 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
237                 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
238                 if (err) {
239                         if (!rc)
240                                 rc = err;
241                         oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
242                                                              OBD_MD_FLGRPQUOTA);
243                         CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, "
244                                "(rc = %d)\n",
245                                cnt == USRQUOTA ? "user" : "group",
246                                cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err);
247                         continue;
248                 }
249
250                 if (oqctl->qc_dqblk.dqb_bhardlimit &&
251                    (toqb(oqctl->qc_dqblk.dqb_curspace) >=
252                     oqctl->qc_dqblk.dqb_bhardlimit)) {
253                         oa->o_flags |= (cnt == USRQUOTA) ?
254                                 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
255                         CDEBUG(D_QUOTA, "out of quota for %s %d\n",
256                                cnt == USRQUOTA ? "user" : "group",
257                                cnt == USRQUOTA ? oa->o_uid : oa->o_gid);
258                 }
259         }
260         OBD_FREE_PTR(oqctl);
261         RETURN(rc);
262 }
263
264 /**
265  * check whether the left quota of certain uid and gid can satisfy a block_write
266  * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA
267  */
268 static int quota_check_common(struct obd_device *obd, const unsigned int id[],
269                               int pending[], int count, int cycle, int isblk,
270                               struct inode *inode, int frags)
271 {
272         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
273         int i;
274         struct qunit_data qdata[MAXQUOTAS];
275         int mb = 0;
276         int rc = 0, rc2[2] = { 0, 0 };
277         ENTRY;
278
279         cfs_spin_lock(&qctxt->lqc_lock);
280         if (!qctxt->lqc_valid){
281                 cfs_spin_unlock(&qctxt->lqc_lock);
282                 RETURN(rc);
283         }
284         cfs_spin_unlock(&qctxt->lqc_lock);
285
286         for (i = 0; i < MAXQUOTAS; i++) {
287                 struct lustre_qunit_size *lqs = NULL;
288
289                 qdata[i].qd_id = id[i];
290                 qdata[i].qd_flags = i;
291                 if (isblk)
292                         QDATA_SET_BLK(&qdata[i]);
293                 qdata[i].qd_count = 0;
294
295                 /* check if quota is enabled */
296                 if (!ll_sb_has_quota_active(qctxt->lqc_sb, i))
297                         continue;
298
299                 /* ignore root user */
300                 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
301                         continue;
302
303                 lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
304                 if (lqs == NULL || IS_ERR(lqs))
305                         continue;
306
307                 if (IS_ERR(lqs)) {
308                         CERROR("can not find lqs for check_common: "
309                                "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n",
310                                id[i], i % 2 ? 'g': 'u', isblk, count,
311                                PTR_ERR(lqs));
312                         RETURN(PTR_ERR(lqs));
313                 }
314
315                 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
316                 cfs_spin_lock(&lqs->lqs_lock);
317                 if (!cycle) {
318                         if (isblk) {
319                                 pending[i] = count * CFS_PAGE_SIZE;
320                                 /* in order to complete this write, we need extra
321                                  * meta blocks. This function can get it through
322                                  * data needed to be written b=16542 */
323                                 if (inode) {
324                                         mb = pending[i];
325                                         rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
326                                                              &mb, inode,
327                                                              frags);
328                                         if (rc)
329                                                 CERROR("%s: can't get extra "
330                                                        "meta blocks\n",
331                                                        obd->obd_name);
332                                         else
333                                                 pending[i] += mb;
334                                 }
335                                 LASSERTF(pending[i] >= 0, "pending is not valid"
336                                          ", count=%d, mb=%d\n", count, mb);
337                                 lqs->lqs_bwrite_pending += pending[i];
338                         } else {
339                                 pending[i] = count;
340                                 lqs->lqs_iwrite_pending += pending[i];
341                         }
342                 }
343
344                 /* if xx_rec < 0, that means quota are releasing,
345                  * and it may return before we use quota. So if
346                  * we find this situation, we assuming it has
347                  * returned b=18491 */
348                 if (isblk && lqs->lqs_blk_rec < 0) {
349                         if (qdata[i].qd_count < -lqs->lqs_blk_rec)
350                                 qdata[i].qd_count = 0;
351                         else
352                                 qdata[i].qd_count += lqs->lqs_blk_rec;
353                 }
354                 if (!isblk && lqs->lqs_ino_rec < 0) {
355                         if (qdata[i].qd_count < -lqs->lqs_ino_rec)
356                                 qdata[i].qd_count = 0;
357                         else
358                                 qdata[i].qd_count += lqs->lqs_ino_rec;
359                 }
360
361                 CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]"
362                        " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]"
363                        " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count,
364                        isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
365                        qdata[i].qd_count, mb, pending[i]);
366                 if (rc2[i] == QUOTA_RET_OK) {
367                         if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
368                                 rc2[i] = QUOTA_RET_ACQUOTA;
369                         if (!isblk && qdata[i].qd_count <
370                             lqs->lqs_iwrite_pending)
371                                 rc2[i] = QUOTA_RET_ACQUOTA;
372                 }
373
374                 cfs_spin_unlock(&lqs->lqs_lock);
375
376                 if (lqs->lqs_blk_rec  < 0 &&
377                     qdata[i].qd_count <
378                     lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
379                         OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
380
381                 /* When cycle is zero, lqs_*_pending will be changed. We will
382                  * get reference of the lqs here and put reference of lqs in
383                  * quota_pending_commit b=14784 */
384                 if (!cycle)
385                         lqs_getref(lqs);
386
387                 /* this is for quota_search_lqs */
388                 lqs_putref(lqs);
389         }
390
391         if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
392                 RETURN(QUOTA_RET_ACQUOTA);
393         else
394                 RETURN(rc);
395 }
396
397 int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag)
398 {
399         struct lustre_qunit_size *lqs;
400         int i, q_set = 0;
401
402         if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
403                 RETURN(0);
404
405         for (i = 0; i < MAXQUOTAS; i++) {
406                 /* check if quota is enabled */
407                 if (!ll_sb_has_quota_active(obd->u.obt.obt_qctxt.lqc_sb, i))
408                         continue;
409                 lqs = quota_search_lqs(LQS_KEY(i, id[i]),
410                                        &obd->u.obt.obt_qctxt, 0);
411                 if (lqs && !IS_ERR(lqs)) {
412                         if (lqs->lqs_flags & flag)
413                                 q_set = 1;
414                         lqs_putref(lqs);
415                 }
416         }
417
418         return q_set;
419 }
420
421 static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
422                                 const unsigned int id[], int pending[],
423                                 int count, quota_acquire acquire,
424                                 struct obd_trans_info *oti, int isblk,
425                                 struct inode *inode, int frags)
426 {
427         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
428         struct timeval work_start;
429         struct timeval work_end;
430         long timediff;
431         struct l_wait_info lwi = { 0 };
432         int rc = 0, cycle = 0, count_err = 1;
433         ENTRY;
434
435         if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
436                 RETURN(0);
437
438         if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
439                 /* If the client has been evicted or if it
440                  * timed out and tried to reconnect already,
441                  * abort the request immediately */
442                 RETURN(-ENOTCONN);
443
444         CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
445         pending[USRQUOTA] = pending[GRPQUOTA] = 0;
446         /* Unfortunately, if quota master is too busy to handle the
447          * pre-dqacq in time and quota hash on ost is used up, we
448          * have to wait for the completion of in flight dqacq/dqrel,
449          * in order to get enough quota for write b=12588 */
450         cfs_gettimeofday(&work_start);
451         while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
452                                         inode, frags)) &
453                QUOTA_RET_ACQUOTA) {
454                 struct ptlrpc_thread *thr = oti != NULL ?
455                                             oti->oti_thread : NULL;
456
457                 cfs_spin_lock(&qctxt->lqc_lock);
458                 if (!qctxt->lqc_import && oti != NULL) {
459                         cfs_spin_unlock(&qctxt->lqc_lock);
460
461                         LASSERT(thr != NULL);
462                         /* The recovery thread doesn't have watchdog
463                          * attached. LU-369 */
464                         if (thr->t_watchdog != NULL)
465                                 lc_watchdog_disable(thr->t_watchdog);
466                         CDEBUG(D_QUOTA, "sleep for quota master\n");
467                         l_wait_event(qctxt->lqc_wait_for_qmaster,
468                                      check_qm(qctxt), &lwi);
469
470                         CDEBUG(D_QUOTA, "wake up when quota master is back\n");
471                         if (thr->t_watchdog != NULL) {
472                                 lc_watchdog_touch(thr->t_watchdog,
473                                    ptlrpc_server_get_timeout(thr->t_svcpt));
474                         }
475                 } else {
476                         cfs_spin_unlock(&qctxt->lqc_lock);
477                 }
478
479                 cycle++;
480                 if (isblk)
481                         OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
482                 /* after acquire(), we should run quota_check_common again
483                  * so that we confirm there are enough quota to finish write */
484                 rc = acquire(obd, id, oti, isblk);
485
486                 /* please reference to dqacq_completion for the below */
487                 /* a new request is finished, try again */
488                 if (rc == QUOTA_REQ_RETURNED) {
489                         CDEBUG(D_QUOTA, "finish a quota req, try again\n");
490                         continue;
491                 }
492
493                 /* it is out of quota already */
494                 if (rc == -EDQUOT) {
495                         CDEBUG(D_QUOTA, "out of quota,  return -EDQUOT\n");
496                         break;
497                 }
498
499                 /* Related quota has been disabled by master, but enabled by
500                  * slave, do not try again. */
501                 if (unlikely(rc == -ESRCH)) {
502                         CERROR("mismatched quota configuration, stop try.\n");
503                         break;
504                 }
505
506                 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
507                         /* The client has been evicted or tried to
508                          * to reconnect already, abort the request */
509                         RETURN(-ENOTCONN);
510
511                 /* -EBUSY and others, wait a second and try again */
512                 if (rc < 0) {
513                         cfs_waitq_t        waitq;
514                         struct l_wait_info lwi;
515
516                         if (thr != NULL && thr->t_watchdog != NULL)
517                                 lc_watchdog_touch(thr->t_watchdog,
518                                    ptlrpc_server_get_timeout(thr->t_svcpt));
519                         CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
520                                count_err++);
521
522                         cfs_waitq_init(&waitq);
523                         lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
524                                           NULL);
525                         l_wait_event(waitq, 0, &lwi);
526                 }
527
528                 if (rc < 0 || cycle % 10 == 0) {
529                         cfs_spin_lock(&last_print_lock);
530                         if (last_print == 0 ||
531                             cfs_time_before((last_print + cfs_time_seconds(30)),
532                                             cfs_time_current())) {
533                                 last_print = cfs_time_current();
534                                 cfs_spin_unlock(&last_print_lock);
535                                 CWARN("still haven't managed to acquire quota "
536                                       "space from the quota master after %d "
537                                       "retries (err=%d, rc=%d)\n",
538                                       cycle, count_err - 1, rc);
539                         } else {
540                                 cfs_spin_unlock(&last_print_lock);
541                         }
542                 }
543
544                 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
545                        cycle);
546         }
547         cfs_gettimeofday(&work_end);
548         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
549         lprocfs_counter_add(qctxt->lqc_stats,
550                             isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
551                                     LQUOTA_WAIT_FOR_CHK_INO,
552                             timediff);
553
554         if (rc > 0)
555                 rc = 0;
556         RETURN(rc);
557 }
558
559 /**
560  * when a block_write or inode_create rpc is finished, adjust the record for
561  * pending blocks and inodes
562  */
563 static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
564                                 int pending[], int isblk)
565 {
566         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
567         struct timeval work_start;
568         struct timeval work_end;
569         long timediff;
570         int i;
571         struct qunit_data qdata[MAXQUOTAS];
572         ENTRY;
573
574         CDEBUG(D_QUOTA, "commit pending quota for  %s\n", obd->obd_name);
575         CLASSERT(MAXQUOTAS < 4);
576         if (!ll_sb_any_quota_active(qctxt->lqc_sb))
577                 RETURN(0);
578
579         cfs_gettimeofday(&work_start);
580         for (i = 0; i < MAXQUOTAS; i++) {
581                 struct lustre_qunit_size *lqs = NULL;
582
583                 LASSERT(pending[i] >= 0);
584                 if (pending[i] == 0)
585                         continue;
586
587                 qdata[i].qd_id = id[i];
588                 qdata[i].qd_flags = i;
589                 if (isblk)
590                         QDATA_SET_BLK(&qdata[i]);
591                 qdata[i].qd_count = 0;
592
593                 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
594                         continue;
595
596                 lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0);
597                 if (lqs == NULL || IS_ERR(lqs)) {
598                         CERROR("can not find lqs for pending_commit: "
599                                "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), "
600                                "maybe cause unexpected lqs refcount error!\n",
601                                id[i], i ? 'g': 'u', pending[i], isblk,
602                                lqs ? PTR_ERR(lqs) : -1);
603                         continue;
604                 }
605
606                 cfs_spin_lock(&lqs->lqs_lock);
607                 if (isblk) {
608                         LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
609                                  "there are too many blocks! [id %u] [%c] "
610                                  "[bwrite_pending %lu] [pending %u]\n",
611                                  id[i], i % 2 ? 'g' : 'u',
612                                  lqs->lqs_bwrite_pending, pending[i]);
613
614                         lqs->lqs_bwrite_pending -= pending[i];
615                 } else {
616                         LASSERTF(lqs->lqs_iwrite_pending >= pending[i],
617                                 "there are too many files! [id %u] [%c] "
618                                 "[iwrite_pending %lu] [pending %u]\n",
619                                 id[i], i % 2 ? 'g' : 'u',
620                                 lqs->lqs_iwrite_pending, pending[i]);
621
622                         lqs->lqs_iwrite_pending -= pending[i];
623                 }
624                 CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n",
625                        obd->obd_name,
626                        isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
627                        i, pending[i], isblk);
628                 cfs_spin_unlock(&lqs->lqs_lock);
629
630                 /* for quota_search_lqs in pending_commit */
631                 lqs_putref(lqs);
632                 /* for quota_search_lqs in quota_check */
633                 lqs_putref(lqs);
634         }
635         cfs_gettimeofday(&work_end);
636         timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
637         lprocfs_counter_add(qctxt->lqc_stats,
638                             isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
639                                     LQUOTA_WAIT_FOR_COMMIT_INO,
640                             timediff);
641
642         RETURN(0);
643 }
644
645 static int mds_quota_init(void)
646 {
647         return lustre_dquot_init();
648 }
649
650 static int mds_quota_exit(void)
651 {
652         lustre_dquot_exit();
653         return 0;
654 }
655
656 static int mds_quota_setup(struct obd_device *obd)
657 {
658         struct obd_device_target *obt = &obd->u.obt;
659         struct mds_obd *mds = &obd->u.mds;
660         int rc;
661         ENTRY;
662
663         if (unlikely(mds->mds_quota)) {
664                 CWARN("try to reinitialize quota context!\n");
665                 RETURN(0);
666         }
667
668         cfs_init_rwsem(&obt->obt_rwsem);
669         obt->obt_qfmt = LUSTRE_QUOTA_V2;
670         mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
671         cfs_sema_init(&obt->obt_quotachecking, 1);
672         /* initialize quota master and quota context */
673         cfs_init_rwsem(&mds->mds_qonoff_sem);
674         rc = qctxt_init(obd, dqacq_handler);
675         if (rc) {
676                 CERROR("%s: initialize quota context failed! (rc:%d)\n",
677                        obd->obd_name, rc);
678                 RETURN(rc);
679         }
680         mds->mds_quota = 1;
681         RETURN(rc);
682 }
683
684 static int mds_quota_cleanup(struct obd_device *obd)
685 {
686         ENTRY;
687         if (unlikely(!obd->u.mds.mds_quota))
688                 RETURN(0);
689
690         qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
691         RETURN(0);
692 }
693
694 static int mds_quota_setinfo(struct obd_device *obd, void *data)
695 {
696         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
697         ENTRY;
698
699         if (unlikely(!obd->u.mds.mds_quota))
700                 RETURN(0);
701
702         if (data != NULL)
703                 QUOTA_MASTER_READY(qctxt);
704         else
705                 QUOTA_MASTER_UNREADY(qctxt);
706         RETURN(0);
707 }
708
709 static int mds_quota_fs_cleanup(struct obd_device *obd)
710 {
711         struct mds_obd *mds = &obd->u.mds;
712         struct obd_quotactl oqctl;
713         ENTRY;
714
715         if (unlikely(!mds->mds_quota))
716                 RETURN(0);
717
718         mds->mds_quota = 0;
719         memset(&oqctl, 0, sizeof(oqctl));
720         oqctl.qc_type = UGQUOTA;
721
722         cfs_down_write(&mds->mds_qonoff_sem);
723         mds_admin_quota_off(obd, &oqctl);
724         cfs_up_write(&mds->mds_qonoff_sem);
725         RETURN(0);
726 }
727
728 static int quota_acquire_common(struct obd_device *obd, const unsigned int id[],
729                                 struct obd_trans_info *oti, int isblk)
730 {
731         struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
732         int rc;
733         ENTRY;
734
735         rc = qctxt_adjust_qunit(obd, qctxt, id, isblk, 1, oti);
736         RETURN(rc);
737 }
738
739 quota_interface_t mds_quota_interface = {
740         .quota_init     = mds_quota_init,
741         .quota_exit     = mds_quota_exit,
742         .quota_setup    = mds_quota_setup,
743         .quota_cleanup  = mds_quota_cleanup,
744         .quota_check    = target_quota_check,
745         .quota_ctl      = mds_quota_ctl,
746         .quota_setinfo  = mds_quota_setinfo,
747         .quota_fs_cleanup = mds_quota_fs_cleanup,
748         .quota_recovery = mds_quota_recovery,
749         .quota_adjust   = mds_quota_adjust,
750         .quota_chkquota = quota_chk_acq_common,
751         .quota_acquire  = quota_acquire_common,
752         .quota_pending_commit = quota_pending_commit,
753 };
754
755 quota_interface_t filter_quota_interface = {
756         .quota_setup    = filter_quota_setup,
757         .quota_cleanup  = filter_quota_cleanup,
758         .quota_check    = target_quota_check,
759         .quota_ctl      = filter_quota_ctl,
760         .quota_setinfo  = filter_quota_setinfo,
761         .quota_clearinfo = filter_quota_clearinfo,
762         .quota_enforce  = filter_quota_enforce,
763         .quota_getflag  = filter_quota_getflag,
764         .quota_acquire  = quota_acquire_common,
765         .quota_adjust   = filter_quota_adjust,
766         .quota_chkquota = quota_chk_acq_common,
767         .quota_adjust_qunit   = filter_quota_adjust_qunit,
768         .quota_pending_commit = quota_pending_commit,
769 };
770
771 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
772
773 static int __init init_lustre_quota(void)
774 {
775         int rc = 0;
776
777         lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
778                                                 proc_lustre_root,
779                                                 NULL, NULL);
780         if (IS_ERR(lquota_type_proc_dir)) {
781                 CERROR("LProcFS failed in lquota-init\n");
782                 rc = PTR_ERR(lquota_type_proc_dir);
783                 return rc;
784         }
785
786         rc = qunit_cache_init();
787         if (rc)
788                 return rc;
789
790         PORTAL_SYMBOL_REGISTER(filter_quota_interface);
791         PORTAL_SYMBOL_REGISTER(mds_quota_interface);
792
793         return 0;
794 }
795
796 static void /*__exit*/ exit_lustre_quota(void)
797 {
798         PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
799         PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
800
801         qunit_cache_cleanup();
802
803         if (lquota_type_proc_dir)
804                 lprocfs_remove(&lquota_type_proc_dir);
805 }
806
807 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
808 MODULE_DESCRIPTION("Lustre Quota");
809 MODULE_LICENSE("GPL");
810
811 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
812
813 EXPORT_SYMBOL(mds_quota_interface);
814 EXPORT_SYMBOL(filter_quota_interface);
815 #endif /* __KERNEL */