1 Remove dqptr_sem (but kept in struct quota_info to keep kernel ABI
2 unchanged), and the functionality of this lock is implemented by
4 * i_dquot is protected by i_lock, however only this pointer, the
5 content of this struct is by dq_data_lock.
6 * Q_GETFMT is now protected with dqonoff_mutex instead of dqptr_sem.
8 fs/quota/dquot.c | 243 +++++++++++++++++++++++++++++------------------------
12 include/linux/fs.h | 2
13 5 files changed, 154 insertions(+), 114 deletions(-)
15 --- linux-2.6.32-279.el6.x86_64/fs/quota/dquot.c 2012-06-14 05:40:59.000000000 +0800
16 +++ linux-2.6.32-279.el6.x86_64.quota/fs/quota/dquot.c 2013-03-07 16:24:24.602781757 +0800
19 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
20 * and quota formats, dqstats structure containing statistics about the lists
21 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
22 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
23 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
24 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
25 - * modifications of quota state (on quotaon and quotaoff) and readers who care
26 - * about latest values take it as well.
27 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
28 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
29 + * and readers who care about latest values take it as well.
31 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
32 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
33 * dq_list_lock > dq_state_lock
35 * Note that some things (eg. sb pointer, type, id) doesn't change during
36 * the life of the dquot structure and so needn't to be protected by a lock
38 - * Any operation working on dquots via inode pointers must hold dqptr_sem. If
39 - * operation is just reading pointers from inode (or not using them at all) the
40 - * read lock is enough. If pointers are altered function must hold write lock
41 + * Any operation working on dquots via inode pointers must hold i_lock.
42 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
43 * for altering the flag i_mutex is also needed).
46 * spinlock to internal buffers before writing.
48 * Lock ordering (including related VFS locks) is the following:
49 - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
51 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
52 - * dqptr_sem. But filesystem has to count with the fact that functions such as
53 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
54 - * from inside a transaction to keep filesystem consistency after a crash. Also
55 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
56 - * called with dqptr_sem held.
57 - * i_mutex on quota files is special (it's below dqio_mutex)
58 + * i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
59 + * i_mutex on quota files is special (it's below dqio_mutex)
62 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
65 * Remove references to dquots from inode and add dquot to list for freeing
66 * if we have the last referece to dquot
67 - * We can't race with anybody because we hold dqptr_sem for writing...
69 static int remove_inode_dquot_ref(struct inode *inode, int type,
70 struct list_head *tofree_head)
72 * We have to scan also I_NEW inodes because they can already
73 * have quota pointer initialized. Luckily, we need to touch
74 * only quota pointers and these have separate locking
78 + spin_lock(&inode->i_lock);
79 if (!IS_NOQUOTA(inode))
80 remove_inode_dquot_ref(inode, type, tofree_head);
81 + spin_unlock(&inode->i_lock);
83 spin_unlock(&inode_lock);
86 LIST_HEAD(tofree_head);
89 - down_write(&sb_dqopt(sb)->dqptr_sem);
90 remove_dquot_ref(sb, type, &tofree_head);
91 - up_write(&sb_dqopt(sb)->dqptr_sem);
92 put_dquot_list(&tofree_head);
98 * Initialize quota pointers in inode
99 - * We do things in a bit complicated way but by that we avoid calling
100 - * dqget() and thus filesystem callbacks under dqptr_sem.
102 int dquot_initialize(struct inode *inode, int type)
104 @@ -1270,8 +1255,7 @@
105 got[cnt] = dqget(sb, id, cnt);
108 - down_write(&sb_dqopt(sb)->dqptr_sem);
109 - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
110 + spin_lock(&inode->i_lock);
111 if (IS_NOQUOTA(inode))
113 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
114 @@ -1288,12 +1272,16 @@
115 * did a write before quota was turned on
117 rsv = inode_get_rsv_space(inode);
119 + if (unlikely(rsv)) {
120 + spin_lock(&dq_data_lock);
121 dquot_resv_space(inode->i_dquot[cnt], rsv);
122 + spin_unlock(&dq_data_lock);
127 - up_write(&sb_dqopt(sb)->dqptr_sem);
128 + spin_unlock(&inode->i_lock);
130 /* Drop unused references */
131 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
133 @@ -1309,12 +1297,12 @@
135 struct dquot *put[MAXQUOTAS];
137 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
138 + spin_lock(&inode->i_lock);
139 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
140 put[cnt] = inode->i_dquot[cnt];
141 inode->i_dquot[cnt] = NULL;
143 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
144 + spin_unlock(&inode->i_lock);
146 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
148 @@ -1357,27 +1345,42 @@
149 return inode->i_sb->dq_op->get_reserved_space(inode);
152 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
154 + *inode_reserved_space(inode) += number;
157 void inode_add_rsv_space(struct inode *inode, qsize_t number)
159 spin_lock(&inode->i_lock);
160 - *inode_reserved_space(inode) += number;
161 + __inode_add_rsv_space(inode, number);
162 spin_unlock(&inode->i_lock);
164 EXPORT_SYMBOL(inode_add_rsv_space);
166 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
167 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
169 - spin_lock(&inode->i_lock);
170 *inode_reserved_space(inode) -= number;
171 __inode_add_bytes(inode, number);
174 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
176 + spin_lock(&inode->i_lock);
177 + __inode_claim_rsv_space(inode, number);
178 spin_unlock(&inode->i_lock);
180 EXPORT_SYMBOL(inode_claim_rsv_space);
182 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
184 + *inode_reserved_space(inode) -= number;
187 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
189 spin_lock(&inode->i_lock);
190 - *inode_reserved_space(inode) -= number;
191 + __inode_sub_rsv_space(inode, number);
192 spin_unlock(&inode->i_lock);
194 EXPORT_SYMBOL(inode_sub_rsv_space);
195 @@ -1388,9 +1391,8 @@
197 if (!inode->i_sb->dq_op->get_reserved_space)
199 - spin_lock(&inode->i_lock);
201 ret = *inode_reserved_space(inode);
202 - spin_unlock(&inode->i_lock);
206 @@ -1398,17 +1400,17 @@
210 - inode_add_rsv_space(inode, number);
211 + __inode_add_rsv_space(inode, number);
213 - inode_add_bytes(inode, number);
214 + __inode_add_bytes(inode, number);
217 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
220 - inode_sub_rsv_space(inode, number);
221 + __inode_sub_rsv_space(inode, number);
223 - inode_sub_bytes(inode, number);
224 + __inode_sub_bytes(inode, number);
228 @@ -1430,6 +1432,7 @@
229 int warn = flags & DQUOT_SPACE_WARN;
230 int reserve = flags & DQUOT_SPACE_RESERVE;
231 int nofail = flags & DQUOT_SPACE_NOFAIL;
232 + struct dquot *dquot[MAXQUOTAS] = { NULL };
235 * First test before acquiring mutex - solves deadlocks when we
236 @@ -1440,20 +1443,24 @@
240 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
241 + spin_lock(&inode->i_lock);
242 if (IS_NOQUOTA(inode)) {
243 inode_incr_space(inode, number, reserve);
245 + spin_unlock(&inode->i_lock);
249 - for (cnt = 0; cnt < MAXQUOTAS; cnt++)
250 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
251 warntype[cnt] = QUOTA_NL_NOWARN;
254 spin_lock(&dq_data_lock);
255 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
256 - if (!inode->i_dquot[cnt])
257 + dquot[cnt] = inode->i_dquot[cnt];
260 - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
261 + atomic_inc(&dquot[cnt]->dq_count);
262 + if (check_bdq(dquot[cnt], number, warn, warntype+cnt)
263 == NO_QUOTA && !nofail) {
265 spin_unlock(&dq_data_lock);
266 @@ -1461,26 +1468,27 @@
269 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
270 - if (!inode->i_dquot[cnt])
274 - dquot_resv_space(inode->i_dquot[cnt], number);
275 + dquot_resv_space(dquot[cnt], number);
277 - dquot_incr_space(inode->i_dquot[cnt], number);
278 + dquot_incr_space(dquot[cnt], number);
280 inode_incr_space(inode, number, reserve);
281 spin_unlock(&dq_data_lock);
282 + spin_unlock(&inode->i_lock);
286 /* Dirtify all the dquots - this can block when journalling */
287 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
288 - if (inode->i_dquot[cnt])
289 - mark_dquot_dirty(inode->i_dquot[cnt]);
291 + mark_dquot_dirty(dquot[cnt]);
293 - flush_warnings(inode->i_dquot, warntype);
295 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
296 + flush_warnings(dquot, warntype);
297 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
302 @@ -1508,6 +1516,7 @@
304 int cnt, ret = NO_QUOTA;
305 char warntype[MAXQUOTAS];
306 + struct dquot *dquot[MAXQUOTAS] = { NULL };
308 /* First test before acquiring mutex - solves deadlocks when we
309 * re-enter the quota code and are already holding the mutex */
310 @@ -1515,35 +1524,41 @@
312 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
313 warntype[cnt] = QUOTA_NL_NOWARN;
314 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
316 + spin_lock(&((struct inode *)inode)->i_lock);
317 if (IS_NOQUOTA(inode)) {
318 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
319 + spin_unlock(&((struct inode *)inode)->i_lock);
322 spin_lock(&dq_data_lock);
323 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
324 - if (!inode->i_dquot[cnt])
325 + dquot[cnt] = inode->i_dquot[cnt];
328 - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
329 + atomic_inc(&dquot[cnt]->dq_count);
330 + if (check_idq(dquot[cnt], number, warntype+cnt)
335 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
336 - if (!inode->i_dquot[cnt])
339 - dquot_incr_inodes(inode->i_dquot[cnt], number);
340 + dquot_incr_inodes(dquot[cnt], number);
344 spin_unlock(&dq_data_lock);
345 + spin_unlock(&((struct inode *)inode)->i_lock);
348 /* Dirtify all the dquots - this can block when journalling */
349 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
350 - if (inode->i_dquot[cnt])
351 - mark_dquot_dirty(inode->i_dquot[cnt]);
352 - flush_warnings(inode->i_dquot, warntype);
353 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
355 + mark_dquot_dirty(dquot[cnt]);
356 + flush_warnings(dquot, warntype);
357 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
361 EXPORT_SYMBOL(dquot_alloc_inode);
362 @@ -1552,34 +1567,40 @@
366 + struct dquot *dquot[MAXQUOTAS] = { NULL };
368 if (IS_NOQUOTA(inode)) {
369 inode_claim_rsv_space(inode, number);
373 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
374 + spin_lock(&inode->i_lock);
375 if (IS_NOQUOTA(inode)) {
376 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
377 - inode_claim_rsv_space(inode, number);
378 + __inode_claim_rsv_space(inode, number);
379 + spin_unlock(&inode->i_lock);
383 spin_lock(&dq_data_lock);
384 /* Claim reserved quotas to allocated quotas */
385 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
386 - if (inode->i_dquot[cnt])
387 - dquot_claim_reserved_space(inode->i_dquot[cnt],
389 + dquot[cnt] = inode->i_dquot[cnt];
391 + atomic_inc(&dquot[cnt]->dq_count);
392 + dquot_claim_reserved_space(dquot[cnt], number);
395 /* Update inode bytes */
396 - inode_claim_rsv_space(inode, number);
397 + __inode_claim_rsv_space(inode, number);
398 spin_unlock(&dq_data_lock);
399 + spin_unlock(&inode->i_lock);
401 /* Dirtify all the dquots - this can block when journalling */
402 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
403 - if (inode->i_dquot[cnt])
404 - mark_dquot_dirty(inode->i_dquot[cnt]);
405 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
407 + mark_dquot_dirty(dquot[cnt]);
408 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
413 @@ -1593,6 +1614,7 @@
415 char warntype[MAXQUOTAS];
416 int reserve = flags & DQUOT_SPACE_RESERVE;
417 + struct dquot *dquot[MAXQUOTAS] = { NULL };
419 /* First test before acquiring mutex - solves deadlocks when we
420 * re-enter the quota code and are already holding the mutex */
421 @@ -1602,34 +1624,37 @@
425 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
426 - /* Now recheck reliably when holding dqptr_sem */
427 + spin_lock(&inode->i_lock);
428 if (IS_NOQUOTA(inode)) {
429 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
430 + spin_unlock(&inode->i_lock);
433 spin_lock(&dq_data_lock);
434 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
435 - if (!inode->i_dquot[cnt])
436 + dquot[cnt] = inode->i_dquot[cnt];
439 - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
440 + atomic_inc(&dquot[cnt]->dq_count);
441 + warntype[cnt] = info_bdq_free(dquot[cnt], number);
443 - dquot_free_reserved_space(inode->i_dquot[cnt], number);
444 + dquot_free_reserved_space(dquot[cnt], number);
446 - dquot_decr_space(inode->i_dquot[cnt], number);
447 + dquot_decr_space(dquot[cnt], number);
449 inode_decr_space(inode, number, reserve);
450 spin_unlock(&dq_data_lock);
451 + spin_unlock(&inode->i_lock);
456 /* Dirtify all the dquots - this can block when journalling */
457 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
458 - if (inode->i_dquot[cnt])
459 - mark_dquot_dirty(inode->i_dquot[cnt]);
461 - flush_warnings(inode->i_dquot, warntype);
462 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
464 + mark_dquot_dirty(dquot[cnt]);
466 + flush_warnings(dquot, warntype);
467 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
472 @@ -1656,32 +1681,37 @@
475 char warntype[MAXQUOTAS];
476 + struct dquot *dquot[MAXQUOTAS] = { NULL };
478 /* First test before acquiring mutex - solves deadlocks when we
479 * re-enter the quota code and are already holding the mutex */
480 if (IS_NOQUOTA(inode))
483 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
484 - /* Now recheck reliably when holding dqptr_sem */
485 + spin_lock(&((struct inode *)inode)->i_lock);
486 if (IS_NOQUOTA(inode)) {
487 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
488 + spin_unlock(&((struct inode *)inode)->i_lock);
491 spin_lock(&dq_data_lock);
492 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
493 - if (!inode->i_dquot[cnt])
494 + dquot[cnt] = inode->i_dquot[cnt];
497 - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
498 - dquot_decr_inodes(inode->i_dquot[cnt], number);
499 + atomic_inc(&dquot[cnt]->dq_count);
500 + warntype[cnt] = info_idq_free(dquot[cnt], number);
501 + dquot_decr_inodes(dquot[cnt], number);
503 spin_unlock(&dq_data_lock);
504 + spin_unlock(&((struct inode *)inode)->i_lock);
506 /* Dirtify all the dquots - this can block when journalling */
507 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
508 - if (inode->i_dquot[cnt])
509 - mark_dquot_dirty(inode->i_dquot[cnt]);
510 - flush_warnings(inode->i_dquot, warntype);
511 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
513 + mark_dquot_dirty(dquot[cnt]);
514 + flush_warnings(dquot, warntype);
515 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
519 EXPORT_SYMBOL(dquot_free_inode);
520 @@ -1721,14 +1751,13 @@
521 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
524 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
525 - /* Now recheck reliably when holding dqptr_sem */
526 + spin_lock(&inode->i_lock);
527 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
528 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
529 + spin_unlock(&inode->i_lock);
532 spin_lock(&dq_data_lock);
533 - cur_space = inode_get_bytes(inode);
534 + cur_space = __inode_get_bytes(inode);
535 rsv_space = inode_get_rsv_space(inode);
536 space = cur_space + rsv_space;
537 /* Build the transfer_from list and check the limits */
538 @@ -1771,7 +1800,7 @@
539 inode->i_dquot[cnt] = transfer_to[cnt];
541 spin_unlock(&dq_data_lock);
542 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
543 + spin_unlock(&inode->i_lock);
545 /* Dirtify all the dquots - this can block when journalling */
546 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
547 @@ -1795,7 +1824,7 @@
550 spin_unlock(&dq_data_lock);
551 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
552 + spin_unlock(&inode->i_lock);
553 /* Clear dquot pointers we don't want to dqput() */
554 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
555 transfer_from[cnt] = NULL;
556 @@ -2047,13 +2076,13 @@
557 /* We don't want quota and atime on quota files (deadlocks
558 * possible) Also nobody should write to the file - we use
559 * special IO operations which ignore the immutable bit. */
560 - down_write(&dqopt->dqptr_sem);
561 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
562 + spin_lock(&inode->i_lock);
563 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
565 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
566 + spin_unlock(&inode->i_lock);
567 mutex_unlock(&inode->i_mutex);
568 - up_write(&dqopt->dqptr_sem);
569 sb->dq_op->drop(inode);
572 @@ -2090,14 +2119,14 @@
575 if (oldflags != -1) {
576 - down_write(&dqopt->dqptr_sem);
577 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
578 + spin_lock(&inode->i_lock);
579 /* Set the flags back (in the case of accidental quotaon()
580 * on a wrong file we don't want to mess up the flags) */
581 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
582 inode->i_flags |= oldflags;
583 + spin_unlock(&inode->i_lock);
584 mutex_unlock(&inode->i_mutex);
585 - up_write(&dqopt->dqptr_sem);
587 mutex_unlock(&dqopt->dqonoff_mutex);
589 --- linux-2.6.32-279.el6.x86_64/fs/quota/quota.c 2012-06-14 05:41:12.000000000 +0800
590 +++ linux-2.6.32-279.el6.x86_64.quota/fs/quota/quota.c 2013-02-21 17:19:03.530511632 +0800
591 @@ -257,13 +257,13 @@
595 - down_read(&sb_dqopt(sb)->dqptr_sem);
596 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
597 if (!sb_has_quota_active(sb, type)) {
598 - up_read(&sb_dqopt(sb)->dqptr_sem);
599 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
602 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
603 - up_read(&sb_dqopt(sb)->dqptr_sem);
604 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
605 if (copy_to_user(addr, &fmt, sizeof(fmt)))
608 --- linux-2.6.32-279.el6.x86_64/fs/stat.c 2012-06-14 05:41:12.000000000 +0800
609 +++ linux-2.6.32-279.el6.x86_64.quota/fs/stat.c 2013-03-04 23:14:19.471077448 +0800
612 EXPORT_SYMBOL(inode_add_bytes);
614 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
615 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
617 - spin_lock(&inode->i_lock);
618 inode->i_blocks -= bytes >> 9;
620 if (inode->i_bytes < bytes) {
621 @@ -432,17 +431,28 @@
622 inode->i_bytes += 512;
624 inode->i_bytes -= bytes;
627 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
629 + spin_lock(&inode->i_lock);
630 + __inode_sub_bytes(inode, bytes);
631 spin_unlock(&inode->i_lock);
634 EXPORT_SYMBOL(inode_sub_bytes);
636 +loff_t __inode_get_bytes(struct inode *inode)
638 + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
641 loff_t inode_get_bytes(struct inode *inode)
645 spin_lock(&inode->i_lock);
646 - ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
647 + ret = __inode_get_bytes(inode);
648 spin_unlock(&inode->i_lock);
651 --- linux-2.6.32-279.el6.x86_64/fs/super.c 2012-06-14 05:41:12.000000000 +0800
652 +++ linux-2.6.32-279.el6.x86_64.quota/fs/super.c 2013-02-21 17:19:03.530511632 +0800
654 mutex_init(&s->s_vfs_rename_mutex);
655 mutex_init(&s->s_dquot.dqio_mutex);
656 mutex_init(&s->s_dquot.dqonoff_mutex);
657 - init_rwsem(&s->s_dquot.dqptr_sem);
658 init_waitqueue_head(&s->s_wait_unfrozen);
659 s->s_maxbytes = MAX_NON_LFS;
660 s->dq_op = sb_dquot_ops;
661 --- linux-2.6.32-279.el6.x86_64/include/linux/fs.h 2012-06-14 05:41:19.000000000 +0800
662 +++ linux-2.6.32-279.el6.x86_64.quota/include/linux/fs.h 2013-02-21 17:19:03.531522089 +0800
663 @@ -2396,7 +2400,9 @@
664 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
665 void __inode_add_bytes(struct inode *inode, loff_t bytes);
666 void inode_add_bytes(struct inode *inode, loff_t bytes);
667 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
668 void inode_sub_bytes(struct inode *inode, loff_t bytes);
669 +loff_t __inode_get_bytes(struct inode *inode);
670 loff_t inode_get_bytes(struct inode *inode);
671 void inode_set_bytes(struct inode *inode, loff_t bytes);