1 Remove dqptr_sem (but kept in struct quota_info to keep kernel ABI
2 unchanged), and the functionality of this lock is implemented by
4 * i_dquot is protected by i_lock, however only this pointer, the
5 content of this struct is by dq_data_lock.
6 * Q_GETFMT is now protected with dqonoff_mutex instead of dqptr_sem.
8 fs/quota/dquot.c | 243 +++++++++++++++++++++++++++++------------------------
12 include/linux/fs.h | 2
13 5 files changed, 154 insertions(+), 114 deletions(-)
15 Index: linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
16 ===================================================================
17 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/dquot.c
18 +++ linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
21 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
22 * and quota formats, dqstats structure containing statistics about the lists
23 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
24 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
25 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
26 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
27 - * modifications of quota state (on quotaon and quotaoff) and readers who care
28 - * about latest values take it as well.
29 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
30 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
31 + * and readers who care about latest values take it as well.
33 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
34 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
35 * dq_list_lock > dq_state_lock
37 * Note that some things (eg. sb pointer, type, id) doesn't change during
38 * the life of the dquot structure and so needn't to be protected by a lock
40 - * Any operation working on dquots via inode pointers must hold dqptr_sem. If
41 - * operation is just reading pointers from inode (or not using them at all) the
42 - * read lock is enough. If pointers are altered function must hold write lock
43 + * Any operation working on dquots via inode pointers must hold i_lock.
44 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
45 * for altering the flag i_mutex is also needed).
48 * spinlock to internal buffers before writing.
50 * Lock ordering (including related VFS locks) is the following:
51 - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
53 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
54 - * dqptr_sem. But filesystem has to count with the fact that functions such as
55 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
56 - * from inside a transaction to keep filesystem consistency after a crash. Also
57 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
58 - * called with dqptr_sem held.
59 - * i_mutex on quota files is special (it's below dqio_mutex)
60 + * i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
61 + * i_mutex on quota files is special (it's below dqio_mutex)
64 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
65 @@ -868,7 +856,6 @@ static inline int dqput_blocks(struct dq
67 * Remove references to dquots from inode and add dquot to list for freeing
68 * if we have the last referece to dquot
69 - * We can't race with anybody because we hold dqptr_sem for writing...
71 static int remove_inode_dquot_ref(struct inode *inode, int type,
72 struct list_head *tofree_head)
73 @@ -926,10 +913,12 @@ static void remove_dquot_ref(struct supe
74 * We have to scan also I_NEW inodes because they can already
75 * have quota pointer initialized. Luckily, we need to touch
76 * only quota pointers and these have separate locking
80 + spin_lock(&inode->i_lock);
81 if (!IS_NOQUOTA(inode))
82 remove_inode_dquot_ref(inode, type, tofree_head);
83 + spin_unlock(&inode->i_lock);
85 spin_unlock(&inode_lock);
87 @@ -940,9 +929,7 @@ static void drop_dquot_ref(struct super_
88 LIST_HEAD(tofree_head);
91 - down_write(&sb_dqopt(sb)->dqptr_sem);
92 remove_dquot_ref(sb, type, &tofree_head);
93 - up_write(&sb_dqopt(sb)->dqptr_sem);
94 put_dquot_list(&tofree_head);
97 @@ -1239,8 +1226,6 @@ static int info_bdq_free(struct dquot *d
100 * Initialize quota pointers in inode
101 - * We do things in a bit complicated way but by that we avoid calling
102 - * dqget() and thus filesystem callbacks under dqptr_sem.
104 int dquot_initialize(struct inode *inode, int type)
106 @@ -1270,8 +1255,7 @@ int dquot_initialize(struct inode *inode
107 got[cnt] = dqget(sb, id, cnt);
110 - down_write(&sb_dqopt(sb)->dqptr_sem);
111 - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
112 + spin_lock(&inode->i_lock);
113 if (IS_NOQUOTA(inode))
115 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
116 @@ -1288,12 +1272,16 @@ int dquot_initialize(struct inode *inode
117 * did a write before quota was turned on
119 rsv = inode_get_rsv_space(inode);
121 + if (unlikely(rsv)) {
122 + spin_lock(&dq_data_lock);
123 dquot_resv_space(inode->i_dquot[cnt], rsv);
124 + spin_unlock(&dq_data_lock);
129 - up_write(&sb_dqopt(sb)->dqptr_sem);
130 + spin_unlock(&inode->i_lock);
132 /* Drop unused references */
133 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
135 @@ -1309,12 +1297,12 @@ int dquot_drop(struct inode *inode)
137 struct dquot *put[MAXQUOTAS];
139 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
140 + spin_lock(&inode->i_lock);
141 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
142 put[cnt] = inode->i_dquot[cnt];
143 inode->i_dquot[cnt] = NULL;
145 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
146 + spin_unlock(&inode->i_lock);
148 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
150 @@ -1357,27 +1345,42 @@ static qsize_t *inode_reserved_space(str
151 return inode->i_sb->dq_op->get_reserved_space(inode);
154 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
156 + *inode_reserved_space(inode) += number;
159 void inode_add_rsv_space(struct inode *inode, qsize_t number)
161 spin_lock(&inode->i_lock);
162 - *inode_reserved_space(inode) += number;
163 + __inode_add_rsv_space(inode, number);
164 spin_unlock(&inode->i_lock);
166 EXPORT_SYMBOL(inode_add_rsv_space);
168 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
169 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
171 - spin_lock(&inode->i_lock);
172 *inode_reserved_space(inode) -= number;
173 __inode_add_bytes(inode, number);
176 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
178 + spin_lock(&inode->i_lock);
179 + __inode_claim_rsv_space(inode, number);
180 spin_unlock(&inode->i_lock);
182 EXPORT_SYMBOL(inode_claim_rsv_space);
184 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
186 + *inode_reserved_space(inode) -= number;
189 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
191 spin_lock(&inode->i_lock);
192 - *inode_reserved_space(inode) -= number;
193 + __inode_sub_rsv_space(inode, number);
194 spin_unlock(&inode->i_lock);
196 EXPORT_SYMBOL(inode_sub_rsv_space);
197 @@ -1388,9 +1391,8 @@ static qsize_t inode_get_rsv_space(struc
199 if (!inode->i_sb->dq_op->get_reserved_space)
201 - spin_lock(&inode->i_lock);
203 ret = *inode_reserved_space(inode);
204 - spin_unlock(&inode->i_lock);
208 @@ -1398,17 +1400,17 @@ static void inode_incr_space(struct inod
212 - inode_add_rsv_space(inode, number);
213 + __inode_add_rsv_space(inode, number);
215 - inode_add_bytes(inode, number);
216 + __inode_add_bytes(inode, number);
219 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
222 - inode_sub_rsv_space(inode, number);
223 + __inode_sub_rsv_space(inode, number);
225 - inode_sub_bytes(inode, number);
226 + __inode_sub_bytes(inode, number);
230 @@ -1430,6 +1432,7 @@ int __dquot_alloc_space(struct inode *in
231 struct dquot_warn warn[MAXQUOTAS];
232 struct dquot **dquots = inode->i_dquot;
233 + struct dquot *dquot[MAXQUOTAS] = { NULL };
234 int reserve = flags & DQUOT_SPACE_RESERVE;
237 * First test before acquiring mutex - solves deadlocks when we
238 @@ -1440,48 +1443,53 @@ int __dquot_alloc_space(struct inode *in
242 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
243 + spin_lock(&inode->i_lock);
244 if (IS_NOQUOTA(inode)) {
245 inode_incr_space(inode, number, reserve);
246 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
247 + spin_unlock(&inode->i_lock);
251 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
252 warn[cnt].w_type = QUOTA_NL_NOWARN;
254 spin_lock(&dq_data_lock);
255 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
257 + dquot[cnt] = dquots[cnt];
260 - if (check_bdq(dquots[cnt], number,
261 + atomic_inc(&dquot[cnt]->dq_count);
262 + if (check_bdq(dquot[cnt], number,
263 (flags & DQUOT_SPACE_WARN), &warn[cnt])
264 == NO_QUOTA && !(flags & DQUOT_SPACE_NOFAIL)) {
266 spin_unlock(&dq_data_lock);
267 + spin_unlock(&inode->i_lock);
271 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
276 - dquot_resv_space(dquots[cnt], number);
277 + dquot_resv_space(dquot[cnt], number);
279 - dquot_incr_space(dquots[cnt], number);
280 + dquot_incr_space(dquot[cnt], number);
282 inode_incr_space(inode, number, reserve);
283 spin_unlock(&dq_data_lock);
284 + spin_unlock(&inode->i_lock);
288 /* Dirtify all the dquots - this can block when journalling */
289 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
291 - mark_dquot_dirty(dquots[cnt]);
293 + mark_dquot_dirty(dquot[cnt]);
295 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
296 - flush_warnings(warn);
297 + flush_warnings(warn);
298 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
303 @@ -1508,7 +1517,8 @@ int dquot_alloc_inode(const struct inode
305 int cnt, ret = NO_QUOTA;
306 struct dquot_warn warn[MAXQUOTAS];
307 struct dquot * const *dquots = inode->i_dquot;
308 + struct dquot *dquot[MAXQUOTAS] = { NULL };
310 /* First test before acquiring mutex - solves deadlocks when we
311 * re-enter the quota code and are already holding the mutex */
312 @@ -1515,35 +1525,41 @@ int dquot_alloc_inode(const struct inode
314 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
315 warn[cnt].w_type = QUOTA_NL_NOWARN;
316 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
318 + spin_lock(&((struct inode *)inode)->i_lock);
319 if (IS_NOQUOTA(inode)) {
320 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
321 + spin_unlock(&((struct inode *)inode)->i_lock);
324 spin_lock(&dq_data_lock);
325 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
327 + dquot[cnt] = dquots[cnt];
330 - if (check_idq(dquots[cnt], number, &warn[cnt])
331 + atomic_inc(&dquot[cnt]->dq_count);
332 + if (check_idq(dquot[cnt], number, &warn[cnt])
337 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
341 - dquot_incr_inodes(dquots[cnt], number);
342 + dquot_incr_inodes(dquot[cnt], number);
346 spin_unlock(&dq_data_lock);
347 + spin_unlock(&((struct inode *)inode)->i_lock);
350 /* Dirtify all the dquots - this can block when journalling */
351 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
353 - mark_dquot_dirty(dquots[cnt]);
354 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
355 - flush_warnings(warn);
357 + mark_dquot_dirty(dquot[cnt]);
358 + flush_warnings(warn);
359 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
363 EXPORT_SYMBOL(dquot_alloc_inode);
364 @@ -1552,34 +1568,40 @@ int dquot_claim_space(struct inode *inod
368 + struct dquot *dquot[MAXQUOTAS] = { NULL };
370 if (IS_NOQUOTA(inode)) {
371 inode_claim_rsv_space(inode, number);
375 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
376 + spin_lock(&inode->i_lock);
377 if (IS_NOQUOTA(inode)) {
378 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
379 - inode_claim_rsv_space(inode, number);
380 + __inode_claim_rsv_space(inode, number);
381 + spin_unlock(&inode->i_lock);
385 spin_lock(&dq_data_lock);
386 /* Claim reserved quotas to allocated quotas */
387 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
388 - if (inode->i_dquot[cnt])
389 - dquot_claim_reserved_space(inode->i_dquot[cnt],
391 + dquot[cnt] = inode->i_dquot[cnt];
393 + atomic_inc(&dquot[cnt]->dq_count);
394 + dquot_claim_reserved_space(dquot[cnt], number);
397 /* Update inode bytes */
398 - inode_claim_rsv_space(inode, number);
399 + __inode_claim_rsv_space(inode, number);
400 spin_unlock(&dq_data_lock);
401 + spin_unlock(&inode->i_lock);
403 /* Dirtify all the dquots - this can block when journalling */
404 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
405 - if (inode->i_dquot[cnt])
406 - mark_dquot_dirty(inode->i_dquot[cnt]);
407 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
409 + mark_dquot_dirty(dquot[cnt]);
410 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
415 @@ -1593,7 +1615,8 @@ int __dquot_free_space(struct inode *ino
417 struct dquot_warn warn[MAXQUOTAS];
418 struct dquot **dquots = inode->i_dquot;
419 int reserve = flags & DQUOT_SPACE_RESERVE;
420 + struct dquot *dquot[MAXQUOTAS] = { NULL };
422 /* First test before acquiring mutex - solves deadlocks when we
423 * re-enter the quota code and are already holding the mutex */
424 @@ -1602,39 +1625,42 @@ out_sub:
428 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
429 - /* Now recheck reliably when holding dqptr_sem */
430 + spin_lock(&inode->i_lock);
431 if (IS_NOQUOTA(inode)) {
432 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
433 + spin_unlock(&inode->i_lock);
436 spin_lock(&dq_data_lock);
437 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
440 warn[cnt].w_type = QUOTA_NL_NOWARN;
442 + dquot[cnt] = dquots[cnt];
445 - wtype = info_bdq_free(dquots[cnt], number);
446 + atomic_inc(&dquot[cnt]->dq_count);
447 + wtype = info_bdq_free(dquot[cnt], number);
448 if (wtype != QUOTA_NL_NOWARN)
449 - prepare_warning(&warn[cnt], dquots[cnt], wtype);
450 + prepare_warning(&warn[cnt], dquot[cnt], wtype);
452 - dquot_free_reserved_space(dquots[cnt], number);
453 + dquot_free_reserved_space(dquot[cnt], number);
455 - dquot_decr_space(dquots[cnt], number);
456 + dquot_decr_space(dquot[cnt], number);
458 inode_decr_space(inode, number, reserve);
459 spin_unlock(&dq_data_lock);
460 + spin_unlock(&inode->i_lock);
465 /* Dirtify all the dquots - this can block when journalling */
466 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
468 - mark_dquot_dirty(dquots[cnt]);
470 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
471 - flush_warnings(warn);
473 + mark_dquot_dirty(dquot[cnt]);
475 + flush_warnings(warn);
476 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
481 @@ -1656,39 +1682,44 @@ int dquot_free_inode(const struct inode
484 struct dquot_warn warn[MAXQUOTAS];
485 struct dquot * const *dquots = inode->i_dquot;
486 + struct dquot *dquot[MAXQUOTAS] = { NULL };
488 /* First test before acquiring mutex - solves deadlocks when we
489 * re-enter the quota code and are already holding the mutex */
490 if (IS_NOQUOTA(inode))
493 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
494 - /* Now recheck reliably when holding dqptr_sem */
495 + spin_lock(&((struct inode *)inode)->i_lock);
496 if (IS_NOQUOTA(inode)) {
497 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
498 + spin_unlock(&((struct inode *)inode)->i_lock);
501 spin_lock(&dq_data_lock);
502 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
505 warn[cnt].w_type = QUOTA_NL_NOWARN;
507 + dquot[cnt] = dquots[cnt];
510 - wtype = info_idq_free(dquots[cnt], number);
511 - if (wtype != QUOTA_NL_NOWARN)
512 - prepare_warning(&warn[cnt], dquots[cnt], wtype);
513 - dquot_decr_inodes(dquots[cnt], number);
514 + atomic_inc(&dquot[cnt]->dq_count);
515 + wtype = info_idq_free(dquot[cnt], number);
516 + if (wtype != QUOTA_NL_NOWARN)
517 + prepare_warning(&warn[cnt], dquot[cnt], wtype);
518 + dquot_decr_inodes(dquot[cnt], number);
521 spin_unlock(&dq_data_lock);
522 + spin_unlock(&((struct inode *)inode)->i_lock);
524 /* Dirtify all the dquots - this can block when journalling */
525 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
527 - mark_dquot_dirty(dquots[cnt]);
528 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
529 - flush_warnings(warn);
531 + mark_dquot_dirty(dquot[cnt]);
532 + flush_warnings(warn);
533 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
537 EXPORT_SYMBOL(dquot_free_inode);
538 @@ -1721,14 +1752,13 @@ int dquot_transfer(struct inode *inode,
539 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
542 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
543 - /* Now recheck reliably when holding dqptr_sem */
544 + spin_lock(&inode->i_lock);
545 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
546 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
547 + spin_unlock(&inode->i_lock);
550 spin_lock(&dq_data_lock);
551 - cur_space = inode_get_bytes(inode);
552 + cur_space = __inode_get_bytes(inode);
553 rsv_space = inode_get_rsv_space(inode);
554 space = cur_space + rsv_space;
555 /* Build the transfer_from list and check the limits */
556 @@ -1771,7 +1801,7 @@ int dquot_transfer(struct inode *inode,
557 inode->i_dquot[cnt] = transfer_to[cnt];
559 spin_unlock(&dq_data_lock);
560 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
561 + spin_unlock(&inode->i_lock);
563 /* Dirtify all the dquots - this can block when journalling */
564 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
565 @@ -1795,7 +1825,7 @@ put_all:
568 spin_unlock(&dq_data_lock);
569 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
570 + spin_unlock(&inode->i_lock);
571 /* Clear dquot pointers we don't want to dqput() */
572 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
573 transfer_from[cnt] = NULL;
574 @@ -2047,13 +2077,13 @@ static int vfs_load_quota_inode(struct i
575 /* We don't want quota and atime on quota files (deadlocks
576 * possible) Also nobody should write to the file - we use
577 * special IO operations which ignore the immutable bit. */
578 - down_write(&dqopt->dqptr_sem);
579 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
580 + spin_lock(&inode->i_lock);
581 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
583 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
584 + spin_unlock(&inode->i_lock);
585 mutex_unlock(&inode->i_mutex);
586 - up_write(&dqopt->dqptr_sem);
587 sb->dq_op->drop(inode);
590 @@ -2090,14 +2120,14 @@ out_file_init:
593 if (oldflags != -1) {
594 - down_write(&dqopt->dqptr_sem);
595 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
596 + spin_lock(&inode->i_lock);
597 /* Set the flags back (in the case of accidental quotaon()
598 * on a wrong file we don't want to mess up the flags) */
599 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
600 inode->i_flags |= oldflags;
601 + spin_unlock(&inode->i_lock);
602 mutex_unlock(&inode->i_mutex);
603 - up_write(&dqopt->dqptr_sem);
605 mutex_unlock(&dqopt->dqonoff_mutex);
607 Index: linux-2.6.32-358.0.1.el6/fs/quota/quota.c
608 ===================================================================
609 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/quota.c
610 +++ linux-2.6.32-358.0.1.el6/fs/quota/quota.c
611 @@ -257,13 +257,13 @@ static int do_quotactl(struct super_bloc
615 - down_read(&sb_dqopt(sb)->dqptr_sem);
616 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
617 if (!sb_has_quota_active(sb, type)) {
618 - up_read(&sb_dqopt(sb)->dqptr_sem);
619 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
622 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
623 - up_read(&sb_dqopt(sb)->dqptr_sem);
624 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
625 if (copy_to_user(addr, &fmt, sizeof(fmt)))
628 Index: linux-2.6.32-358.0.1.el6/fs/stat.c
629 ===================================================================
630 --- linux-2.6.32-358.0.1.el6.orig/fs/stat.c
631 +++ linux-2.6.32-358.0.1.el6/fs/stat.c
632 @@ -422,9 +422,8 @@ void inode_add_bytes(struct inode *inode
634 EXPORT_SYMBOL(inode_add_bytes);
636 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
637 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
639 - spin_lock(&inode->i_lock);
640 inode->i_blocks -= bytes >> 9;
642 if (inode->i_bytes < bytes) {
643 @@ -432,17 +431,28 @@ void inode_sub_bytes(struct inode *inode
644 inode->i_bytes += 512;
646 inode->i_bytes -= bytes;
649 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
651 + spin_lock(&inode->i_lock);
652 + __inode_sub_bytes(inode, bytes);
653 spin_unlock(&inode->i_lock);
656 EXPORT_SYMBOL(inode_sub_bytes);
658 +loff_t __inode_get_bytes(struct inode *inode)
660 + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
663 loff_t inode_get_bytes(struct inode *inode)
667 spin_lock(&inode->i_lock);
668 - ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
669 + ret = __inode_get_bytes(inode);
670 spin_unlock(&inode->i_lock);
673 Index: linux-2.6.32-358.0.1.el6/fs/super.c
674 ===================================================================
675 --- linux-2.6.32-358.0.1.el6.orig/fs/super.c
676 +++ linux-2.6.32-358.0.1.el6/fs/super.c
677 @@ -146,7 +146,6 @@ static struct super_block *alloc_super(s
678 mutex_init(&s->s_vfs_rename_mutex);
679 mutex_init(&s->s_dquot.dqio_mutex);
680 mutex_init(&s->s_dquot.dqonoff_mutex);
681 - init_rwsem(&s->s_dquot.dqptr_sem);
682 init_waitqueue_head(&s->s_wait_unfrozen);
683 s->s_maxbytes = MAX_NON_LFS;
684 s->dq_op = sb_dquot_ops;
685 Index: linux-2.6.32-358.0.1.el6/include/linux/fs.h
686 ===================================================================
687 --- linux-2.6.32-358.0.1.el6.orig/include/linux/fs.h
688 +++ linux-2.6.32-358.0.1.el6/include/linux/fs.h
689 @@ -2567,7 +2567,9 @@ extern void generic_fillattr(struct inod
690 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
691 void __inode_add_bytes(struct inode *inode, loff_t bytes);
692 void inode_add_bytes(struct inode *inode, loff_t bytes);
693 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
694 void inode_sub_bytes(struct inode *inode, loff_t bytes);
695 +loff_t __inode_get_bytes(struct inode *inode);
696 loff_t inode_get_bytes(struct inode *inode);
697 void inode_set_bytes(struct inode *inode, loff_t bytes);