1 Remove dqptr_sem (but kept in struct quota_info to keep kernel ABI
2 unchanged), and the functionality of this lock is implemented by
4 * i_dquot is protected by i_lock, however only this pointer, the
5 content of this struct is by dq_data_lock.
6 * Q_GETFMT is now protected with dqonoff_mutex instead of dqptr_sem.
8 fs/quota/dquot.c | 243 +++++++++++++++++++++++++++++------------------------
12 include/linux/fs.h | 2
13 5 files changed, 154 insertions(+), 114 deletions(-)
15 Index: linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
16 ===================================================================
17 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/dquot.c
18 +++ linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
21 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
22 * and quota formats, dqstats structure containing statistics about the lists
23 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
24 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
25 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
26 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
27 - * modifications of quota state (on quotaon and quotaoff) and readers who care
28 - * about latest values take it as well.
29 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
30 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
31 + * and readers who care about latest values take it as well.
33 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
34 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
35 * dq_list_lock > dq_state_lock
37 * Note that some things (eg. sb pointer, type, id) doesn't change during
38 * the life of the dquot structure and so needn't to be protected by a lock
40 - * Any operation working on dquots via inode pointers must hold dqptr_sem. If
41 - * operation is just reading pointers from inode (or not using them at all) the
42 - * read lock is enough. If pointers are altered function must hold write lock
43 + * Any operation working on dquots via inode pointers must hold i_lock.
44 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
45 * for altering the flag i_mutex is also needed).
48 * spinlock to internal buffers before writing.
50 * Lock ordering (including related VFS locks) is the following:
51 - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
53 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
54 - * dqptr_sem. But filesystem has to count with the fact that functions such as
55 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
56 - * from inside a transaction to keep filesystem consistency after a crash. Also
57 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
58 - * called with dqptr_sem held.
59 - * i_mutex on quota files is special (it's below dqio_mutex)
60 + * i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
61 + * i_mutex on quota files is special (it's below dqio_mutex)
64 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
65 @@ -868,7 +856,6 @@ static inline int dqput_blocks(struct dq
67 * Remove references to dquots from inode and add dquot to list for freeing
68 * if we have the last referece to dquot
69 - * We can't race with anybody because we hold dqptr_sem for writing...
71 static int remove_inode_dquot_ref(struct inode *inode, int type,
72 struct list_head *tofree_head)
73 @@ -926,10 +913,12 @@ static void remove_dquot_ref(struct supe
74 * We have to scan also I_NEW inodes because they can already
75 * have quota pointer initialized. Luckily, we need to touch
76 * only quota pointers and these have separate locking
80 + spin_lock(&inode->i_lock);
81 if (!IS_NOQUOTA(inode))
82 remove_inode_dquot_ref(inode, type, tofree_head);
83 + spin_unlock(&inode->i_lock);
85 spin_unlock(&inode_lock);
87 @@ -940,9 +929,7 @@ static void drop_dquot_ref(struct super_
88 LIST_HEAD(tofree_head);
91 - down_write(&sb_dqopt(sb)->dqptr_sem);
92 remove_dquot_ref(sb, type, &tofree_head);
93 - up_write(&sb_dqopt(sb)->dqptr_sem);
94 put_dquot_list(&tofree_head);
97 @@ -1239,8 +1226,6 @@ static int info_bdq_free(struct dquot *d
100 * Initialize quota pointers in inode
101 - * We do things in a bit complicated way but by that we avoid calling
102 - * dqget() and thus filesystem callbacks under dqptr_sem.
104 int dquot_initialize(struct inode *inode, int type)
106 @@ -1270,8 +1255,7 @@ int dquot_initialize(struct inode *inode
107 got[cnt] = dqget(sb, id, cnt);
110 - down_write(&sb_dqopt(sb)->dqptr_sem);
111 - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
112 + spin_lock(&inode->i_lock);
113 if (IS_NOQUOTA(inode))
115 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
116 @@ -1288,12 +1272,16 @@ int dquot_initialize(struct inode *inode
117 * did a write before quota was turned on
119 rsv = inode_get_rsv_space(inode);
121 + if (unlikely(rsv)) {
122 + spin_lock(&dq_data_lock);
123 dquot_resv_space(inode->i_dquot[cnt], rsv);
124 + spin_unlock(&dq_data_lock);
129 - up_write(&sb_dqopt(sb)->dqptr_sem);
130 + spin_unlock(&inode->i_lock);
132 /* Drop unused references */
133 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
135 @@ -1309,12 +1297,12 @@ int dquot_drop(struct inode *inode)
137 struct dquot *put[MAXQUOTAS];
139 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
140 + spin_lock(&inode->i_lock);
141 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
142 put[cnt] = inode->i_dquot[cnt];
143 inode->i_dquot[cnt] = NULL;
145 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
146 + spin_unlock(&inode->i_lock);
148 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
150 @@ -1357,27 +1345,42 @@ static qsize_t *inode_reserved_space(str
151 return inode->i_sb->dq_op->get_reserved_space(inode);
154 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
156 + *inode_reserved_space(inode) += number;
159 void inode_add_rsv_space(struct inode *inode, qsize_t number)
161 spin_lock(&inode->i_lock);
162 - *inode_reserved_space(inode) += number;
163 + __inode_add_rsv_space(inode, number);
164 spin_unlock(&inode->i_lock);
166 EXPORT_SYMBOL(inode_add_rsv_space);
168 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
169 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
171 - spin_lock(&inode->i_lock);
172 *inode_reserved_space(inode) -= number;
173 __inode_add_bytes(inode, number);
176 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
178 + spin_lock(&inode->i_lock);
179 + __inode_claim_rsv_space(inode, number);
180 spin_unlock(&inode->i_lock);
182 EXPORT_SYMBOL(inode_claim_rsv_space);
184 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
186 + *inode_reserved_space(inode) -= number;
189 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
191 spin_lock(&inode->i_lock);
192 - *inode_reserved_space(inode) -= number;
193 + __inode_sub_rsv_space(inode, number);
194 spin_unlock(&inode->i_lock);
196 EXPORT_SYMBOL(inode_sub_rsv_space);
197 @@ -1388,9 +1391,8 @@ static qsize_t inode_get_rsv_space(struc
199 if (!inode->i_sb->dq_op->get_reserved_space)
201 - spin_lock(&inode->i_lock);
203 ret = *inode_reserved_space(inode);
204 - spin_unlock(&inode->i_lock);
208 @@ -1398,17 +1400,17 @@ static void inode_incr_space(struct inod
212 - inode_add_rsv_space(inode, number);
213 + __inode_add_rsv_space(inode, number);
215 - inode_add_bytes(inode, number);
216 + __inode_add_bytes(inode, number);
219 static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
222 - inode_sub_rsv_space(inode, number);
223 + __inode_sub_rsv_space(inode, number);
225 - inode_sub_bytes(inode, number);
226 + __inode_sub_bytes(inode, number);
230 @@ -1430,6 +1432,7 @@ int __dquot_alloc_space(struct inode *in
231 int warn = flags & DQUOT_SPACE_WARN;
232 int reserve = flags & DQUOT_SPACE_RESERVE;
233 int nofail = flags & DQUOT_SPACE_NOFAIL;
234 + struct dquot *dquot[MAXQUOTAS] = { NULL };
237 * First test before acquiring mutex - solves deadlocks when we
238 @@ -1440,47 +1443,53 @@ int __dquot_alloc_space(struct inode *in
242 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
243 + spin_lock(&inode->i_lock);
244 if (IS_NOQUOTA(inode)) {
245 inode_incr_space(inode, number, reserve);
247 + spin_unlock(&inode->i_lock);
251 - for (cnt = 0; cnt < MAXQUOTAS; cnt++)
252 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
253 warntype[cnt] = QUOTA_NL_NOWARN;
256 spin_lock(&dq_data_lock);
257 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
258 - if (!inode->i_dquot[cnt])
259 + dquot[cnt] = inode->i_dquot[cnt];
262 - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
263 + atomic_inc(&dquot[cnt]->dq_count);
264 + if (check_bdq(dquot[cnt], number, warn, warntype+cnt)
265 == NO_QUOTA && !nofail) {
267 spin_unlock(&dq_data_lock);
268 + spin_unlock(&inode->i_lock);
272 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
273 - if (!inode->i_dquot[cnt])
277 - dquot_resv_space(inode->i_dquot[cnt], number);
278 + dquot_resv_space(dquot[cnt], number);
280 - dquot_incr_space(inode->i_dquot[cnt], number);
281 + dquot_incr_space(dquot[cnt], number);
283 inode_incr_space(inode, number, reserve);
284 spin_unlock(&dq_data_lock);
285 + spin_unlock(&inode->i_lock);
289 /* Dirtify all the dquots - this can block when journalling */
290 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
291 - if (inode->i_dquot[cnt])
292 - mark_dquot_dirty(inode->i_dquot[cnt]);
294 + mark_dquot_dirty(dquot[cnt]);
296 - flush_warnings(inode->i_dquot, warntype);
298 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
299 + flush_warnings(dquot, warntype);
300 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
305 @@ -1508,6 +1517,7 @@ int dquot_alloc_inode(const struct inode
307 int cnt, ret = NO_QUOTA;
308 char warntype[MAXQUOTAS];
309 + struct dquot *dquot[MAXQUOTAS] = { NULL };
311 /* First test before acquiring mutex - solves deadlocks when we
312 * re-enter the quota code and are already holding the mutex */
313 @@ -1515,35 +1525,41 @@ int dquot_alloc_inode(const struct inode
315 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
316 warntype[cnt] = QUOTA_NL_NOWARN;
317 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
319 + spin_lock(&((struct inode *)inode)->i_lock);
320 if (IS_NOQUOTA(inode)) {
321 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
322 + spin_unlock(&((struct inode *)inode)->i_lock);
325 spin_lock(&dq_data_lock);
326 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
327 - if (!inode->i_dquot[cnt])
328 + dquot[cnt] = inode->i_dquot[cnt];
331 - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
332 + atomic_inc(&dquot[cnt]->dq_count);
333 + if (check_idq(dquot[cnt], number, warntype+cnt)
338 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
339 - if (!inode->i_dquot[cnt])
342 - dquot_incr_inodes(inode->i_dquot[cnt], number);
343 + dquot_incr_inodes(dquot[cnt], number);
347 spin_unlock(&dq_data_lock);
348 + spin_unlock(&((struct inode *)inode)->i_lock);
351 /* Dirtify all the dquots - this can block when journalling */
352 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
353 - if (inode->i_dquot[cnt])
354 - mark_dquot_dirty(inode->i_dquot[cnt]);
355 - flush_warnings(inode->i_dquot, warntype);
356 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
358 + mark_dquot_dirty(dquot[cnt]);
359 + flush_warnings(dquot, warntype);
360 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
364 EXPORT_SYMBOL(dquot_alloc_inode);
365 @@ -1552,34 +1568,40 @@ int dquot_claim_space(struct inode *inod
369 + struct dquot *dquot[MAXQUOTAS] = { NULL };
371 if (IS_NOQUOTA(inode)) {
372 inode_claim_rsv_space(inode, number);
376 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
377 + spin_lock(&inode->i_lock);
378 if (IS_NOQUOTA(inode)) {
379 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
380 - inode_claim_rsv_space(inode, number);
381 + __inode_claim_rsv_space(inode, number);
382 + spin_unlock(&inode->i_lock);
386 spin_lock(&dq_data_lock);
387 /* Claim reserved quotas to allocated quotas */
388 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
389 - if (inode->i_dquot[cnt])
390 - dquot_claim_reserved_space(inode->i_dquot[cnt],
392 + dquot[cnt] = inode->i_dquot[cnt];
394 + atomic_inc(&dquot[cnt]->dq_count);
395 + dquot_claim_reserved_space(dquot[cnt], number);
398 /* Update inode bytes */
399 - inode_claim_rsv_space(inode, number);
400 + __inode_claim_rsv_space(inode, number);
401 spin_unlock(&dq_data_lock);
402 + spin_unlock(&inode->i_lock);
404 /* Dirtify all the dquots - this can block when journalling */
405 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
406 - if (inode->i_dquot[cnt])
407 - mark_dquot_dirty(inode->i_dquot[cnt]);
408 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
410 + mark_dquot_dirty(dquot[cnt]);
411 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
416 @@ -1593,6 +1615,7 @@ int __dquot_free_space(struct inode *ino
418 char warntype[MAXQUOTAS];
419 int reserve = flags & DQUOT_SPACE_RESERVE;
420 + struct dquot *dquot[MAXQUOTAS] = { NULL };
422 /* First test before acquiring mutex - solves deadlocks when we
423 * re-enter the quota code and are already holding the mutex */
424 @@ -1602,34 +1625,37 @@ out_sub:
428 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
429 - /* Now recheck reliably when holding dqptr_sem */
430 + spin_lock(&inode->i_lock);
431 if (IS_NOQUOTA(inode)) {
432 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
433 + spin_unlock(&inode->i_lock);
436 spin_lock(&dq_data_lock);
437 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
438 - if (!inode->i_dquot[cnt])
439 + dquot[cnt] = inode->i_dquot[cnt];
442 - warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
443 + atomic_inc(&dquot[cnt]->dq_count);
444 + warntype[cnt] = info_bdq_free(dquot[cnt], number);
446 - dquot_free_reserved_space(inode->i_dquot[cnt], number);
447 + dquot_free_reserved_space(dquot[cnt], number);
449 - dquot_decr_space(inode->i_dquot[cnt], number);
450 + dquot_decr_space(dquot[cnt], number);
452 inode_decr_space(inode, number, reserve);
453 spin_unlock(&dq_data_lock);
454 + spin_unlock(&inode->i_lock);
459 /* Dirtify all the dquots - this can block when journalling */
460 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
461 - if (inode->i_dquot[cnt])
462 - mark_dquot_dirty(inode->i_dquot[cnt]);
464 - flush_warnings(inode->i_dquot, warntype);
465 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
467 + mark_dquot_dirty(dquot[cnt]);
469 + flush_warnings(dquot, warntype);
470 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
475 @@ -1656,32 +1682,37 @@ int dquot_free_inode(const struct inode
478 char warntype[MAXQUOTAS];
479 + struct dquot *dquot[MAXQUOTAS] = { NULL };
481 /* First test before acquiring mutex - solves deadlocks when we
482 * re-enter the quota code and are already holding the mutex */
483 if (IS_NOQUOTA(inode))
486 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
487 - /* Now recheck reliably when holding dqptr_sem */
488 + spin_lock(&((struct inode *)inode)->i_lock);
489 if (IS_NOQUOTA(inode)) {
490 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
491 + spin_unlock(&((struct inode *)inode)->i_lock);
494 spin_lock(&dq_data_lock);
495 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
496 - if (!inode->i_dquot[cnt])
497 + dquot[cnt] = inode->i_dquot[cnt];
500 - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
501 - dquot_decr_inodes(inode->i_dquot[cnt], number);
502 + atomic_inc(&dquot[cnt]->dq_count);
503 + warntype[cnt] = info_idq_free(dquot[cnt], number);
504 + dquot_decr_inodes(dquot[cnt], number);
506 spin_unlock(&dq_data_lock);
507 + spin_unlock(&((struct inode *)inode)->i_lock);
509 /* Dirtify all the dquots - this can block when journalling */
510 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
511 - if (inode->i_dquot[cnt])
512 - mark_dquot_dirty(inode->i_dquot[cnt]);
513 - flush_warnings(inode->i_dquot, warntype);
514 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
516 + mark_dquot_dirty(dquot[cnt]);
517 + flush_warnings(dquot, warntype);
518 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
522 EXPORT_SYMBOL(dquot_free_inode);
523 @@ -1721,14 +1752,13 @@ int dquot_transfer(struct inode *inode,
524 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
527 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
528 - /* Now recheck reliably when holding dqptr_sem */
529 + spin_lock(&inode->i_lock);
530 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
531 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
532 + spin_unlock(&inode->i_lock);
535 spin_lock(&dq_data_lock);
536 - cur_space = inode_get_bytes(inode);
537 + cur_space = __inode_get_bytes(inode);
538 rsv_space = inode_get_rsv_space(inode);
539 space = cur_space + rsv_space;
540 /* Build the transfer_from list and check the limits */
541 @@ -1771,7 +1801,7 @@ int dquot_transfer(struct inode *inode,
542 inode->i_dquot[cnt] = transfer_to[cnt];
544 spin_unlock(&dq_data_lock);
545 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
546 + spin_unlock(&inode->i_lock);
548 /* Dirtify all the dquots - this can block when journalling */
549 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
550 @@ -1795,7 +1825,7 @@ put_all:
553 spin_unlock(&dq_data_lock);
554 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
555 + spin_unlock(&inode->i_lock);
556 /* Clear dquot pointers we don't want to dqput() */
557 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
558 transfer_from[cnt] = NULL;
559 @@ -2047,13 +2077,13 @@ static int vfs_load_quota_inode(struct i
560 /* We don't want quota and atime on quota files (deadlocks
561 * possible) Also nobody should write to the file - we use
562 * special IO operations which ignore the immutable bit. */
563 - down_write(&dqopt->dqptr_sem);
564 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
565 + spin_lock(&inode->i_lock);
566 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
568 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
569 + spin_unlock(&inode->i_lock);
570 mutex_unlock(&inode->i_mutex);
571 - up_write(&dqopt->dqptr_sem);
572 sb->dq_op->drop(inode);
575 @@ -2090,14 +2120,14 @@ out_file_init:
578 if (oldflags != -1) {
579 - down_write(&dqopt->dqptr_sem);
580 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
581 + spin_lock(&inode->i_lock);
582 /* Set the flags back (in the case of accidental quotaon()
583 * on a wrong file we don't want to mess up the flags) */
584 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
585 inode->i_flags |= oldflags;
586 + spin_unlock(&inode->i_lock);
587 mutex_unlock(&inode->i_mutex);
588 - up_write(&dqopt->dqptr_sem);
590 mutex_unlock(&dqopt->dqonoff_mutex);
592 Index: linux-2.6.32-358.0.1.el6/fs/quota/quota.c
593 ===================================================================
594 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/quota.c
595 +++ linux-2.6.32-358.0.1.el6/fs/quota/quota.c
596 @@ -257,13 +257,13 @@ static int do_quotactl(struct super_bloc
600 - down_read(&sb_dqopt(sb)->dqptr_sem);
601 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
602 if (!sb_has_quota_active(sb, type)) {
603 - up_read(&sb_dqopt(sb)->dqptr_sem);
604 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
607 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
608 - up_read(&sb_dqopt(sb)->dqptr_sem);
609 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
610 if (copy_to_user(addr, &fmt, sizeof(fmt)))
613 Index: linux-2.6.32-358.0.1.el6/fs/stat.c
614 ===================================================================
615 --- linux-2.6.32-358.0.1.el6.orig/fs/stat.c
616 +++ linux-2.6.32-358.0.1.el6/fs/stat.c
617 @@ -422,9 +422,8 @@ void inode_add_bytes(struct inode *inode
619 EXPORT_SYMBOL(inode_add_bytes);
621 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
622 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
624 - spin_lock(&inode->i_lock);
625 inode->i_blocks -= bytes >> 9;
627 if (inode->i_bytes < bytes) {
628 @@ -432,17 +431,28 @@ void inode_sub_bytes(struct inode *inode
629 inode->i_bytes += 512;
631 inode->i_bytes -= bytes;
634 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
636 + spin_lock(&inode->i_lock);
637 + __inode_sub_bytes(inode, bytes);
638 spin_unlock(&inode->i_lock);
641 EXPORT_SYMBOL(inode_sub_bytes);
643 +loff_t __inode_get_bytes(struct inode *inode)
645 + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
648 loff_t inode_get_bytes(struct inode *inode)
652 spin_lock(&inode->i_lock);
653 - ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
654 + ret = __inode_get_bytes(inode);
655 spin_unlock(&inode->i_lock);
658 Index: linux-2.6.32-358.0.1.el6/fs/super.c
659 ===================================================================
660 --- linux-2.6.32-358.0.1.el6.orig/fs/super.c
661 +++ linux-2.6.32-358.0.1.el6/fs/super.c
662 @@ -146,7 +146,6 @@ static struct super_block *alloc_super(s
663 mutex_init(&s->s_vfs_rename_mutex);
664 mutex_init(&s->s_dquot.dqio_mutex);
665 mutex_init(&s->s_dquot.dqonoff_mutex);
666 - init_rwsem(&s->s_dquot.dqptr_sem);
667 init_waitqueue_head(&s->s_wait_unfrozen);
668 s->s_maxbytes = MAX_NON_LFS;
669 s->dq_op = sb_dquot_ops;
670 Index: linux-2.6.32-358.0.1.el6/include/linux/fs.h
671 ===================================================================
672 --- linux-2.6.32-358.0.1.el6.orig/include/linux/fs.h
673 +++ linux-2.6.32-358.0.1.el6/include/linux/fs.h
674 @@ -2567,7 +2567,9 @@ extern void generic_fillattr(struct inod
675 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
676 void __inode_add_bytes(struct inode *inode, loff_t bytes);
677 void inode_add_bytes(struct inode *inode, loff_t bytes);
678 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
679 void inode_sub_bytes(struct inode *inode, loff_t bytes);
680 +loff_t __inode_get_bytes(struct inode *inode);
681 loff_t inode_get_bytes(struct inode *inode);
682 void inode_set_bytes(struct inode *inode, loff_t bytes);