Whamcloud - gitweb
LU-4008 mdt: update mdt_getattr comments about EA sizing
[fs/lustre-release.git] / lustre / kernel_patches / patches / quota-replace-dqptr-sem.patch
1 Remove dqptr_sem (but kept in struct quota_info to keep kernel ABI
2 unchanged), and the functionality of this lock is implemented by
3 other locks:
4 * i_dquot is protected by i_lock, however only this pointer, the
5   content of this struct is by dq_data_lock.
6 * Q_GETFMT is now protected with dqonoff_mutex instead of dqptr_sem.
7
8  fs/quota/dquot.c   |  243 +++++++++++++++++++++++++++++------------------------
9  fs/quota/quota.c   |    6 -
10  fs/stat.c          |   16 ++-
11  fs/super.c         |    1
12  include/linux/fs.h |    2
13  5 files changed, 154 insertions(+), 114 deletions(-)
14
15 Index: linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
16 ===================================================================
17 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/dquot.c
18 +++ linux-2.6.32-358.0.1.el6/fs/quota/dquot.c
19 @@ -83,22 +83,17 @@
20  /*
21   * There are three quota SMP locks. dq_list_lock protects all lists with quotas
22   * and quota formats, dqstats structure containing statistics about the lists
23 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
24 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
25 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
26 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
27 - * modifications of quota state (on quotaon and quotaoff) and readers who care
28 - * about latest values take it as well.
29 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
30 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
31 + * and readers who care about latest values take it as well.
32   *
33 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
34 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
35   *   dq_list_lock > dq_state_lock
36   *
37   * Note that some things (eg. sb pointer, type, id) doesn't change during
38   * the life of the dquot structure and so needn't to be protected by a lock
39   *
40 - * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
41 - * operation is just reading pointers from inode (or not using them at all) the
42 - * read lock is enough. If pointers are altered function must hold write lock
43 + * Any operation working on dquots via inode pointers must hold i_lock.
44   * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
45   * for altering the flag i_mutex is also needed).
46   *
47 @@ -112,15 +107,8 @@
48   * spinlock to internal buffers before writing.
49   *
50   * Lock ordering (including related VFS locks) is the following:
51 - *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
52 - *   dqio_mutex
53 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
54 - * dqptr_sem. But filesystem has to count with the fact that functions such as
55 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
56 - * from inside a transaction to keep filesystem consistency after a crash. Also
57 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
58 - * called with dqptr_sem held.
59 - * i_mutex on quota files is special (it's below dqio_mutex)
60 + *  i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
61 + *  i_mutex on quota files is special (it's below dqio_mutex)
62   */
63  
64  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
65 @@ -868,7 +856,6 @@ static inline int dqput_blocks(struct dq
66  /*
67   * Remove references to dquots from inode and add dquot to list for freeing
68   * if we have the last referece to dquot
69 - * We can't race with anybody because we hold dqptr_sem for writing...
70   */
71  static int remove_inode_dquot_ref(struct inode *inode, int type,
72                                   struct list_head *tofree_head)
73 @@ -926,10 +913,12 @@ static void remove_dquot_ref(struct supe
74                  *  We have to scan also I_NEW inodes because they can already
75                  *  have quota pointer initialized. Luckily, we need to touch
76                  *  only quota pointers and these have separate locking
77 -                *  (dqptr_sem).
78 +                *  (i_lock).
79                  */
80 +               spin_lock(&inode->i_lock);
81                 if (!IS_NOQUOTA(inode))
82                         remove_inode_dquot_ref(inode, type, tofree_head);
83 +               spin_unlock(&inode->i_lock);
84         }
85         spin_unlock(&inode_lock);
86  }
87 @@ -940,9 +929,7 @@ static void drop_dquot_ref(struct super_
88         LIST_HEAD(tofree_head);
89  
90         if (sb->dq_op) {
91 -               down_write(&sb_dqopt(sb)->dqptr_sem);
92                 remove_dquot_ref(sb, type, &tofree_head);
93 -               up_write(&sb_dqopt(sb)->dqptr_sem);
94                 put_dquot_list(&tofree_head);
95         }
96  }
97 @@ -1239,8 +1226,6 @@ static int info_bdq_free(struct dquot *d
98  
99  /*
100   *     Initialize quota pointers in inode
101 - *     We do things in a bit complicated way but by that we avoid calling
102 - *     dqget() and thus filesystem callbacks under dqptr_sem.
103   */
104  int dquot_initialize(struct inode *inode, int type)
105  {
106 @@ -1270,8 +1255,7 @@ int dquot_initialize(struct inode *inode
107                 got[cnt] = dqget(sb, id, cnt);
108         }
109  
110 -       down_write(&sb_dqopt(sb)->dqptr_sem);
111 -       /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
112 +       spin_lock(&inode->i_lock);
113         if (IS_NOQUOTA(inode))
114                 goto out_err;
115         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
116 @@ -1288,12 +1272,16 @@ int dquot_initialize(struct inode *inode
117                          * did a write before quota was turned on
118                          */
119                         rsv = inode_get_rsv_space(inode);
120 -                       if (unlikely(rsv))
121 +                       if (unlikely(rsv)) {
122 +                               spin_lock(&dq_data_lock);
123                                 dquot_resv_space(inode->i_dquot[cnt], rsv);
124 +                               spin_unlock(&dq_data_lock);
125 +                       }
126                 }
127         }
128  out_err:
129 -       up_write(&sb_dqopt(sb)->dqptr_sem);
130 +       spin_unlock(&inode->i_lock);
131 +
132         /* Drop unused references */
133         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
134                 dqput(got[cnt]);
135 @@ -1309,12 +1297,12 @@ int dquot_drop(struct inode *inode)
136         int cnt;
137         struct dquot *put[MAXQUOTAS];
138  
139 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
140 +       spin_lock(&inode->i_lock);
141         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
142                 put[cnt] = inode->i_dquot[cnt];
143                 inode->i_dquot[cnt] = NULL;
144         }
145 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
146 +       spin_unlock(&inode->i_lock);
147  
148         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
149                 dqput(put[cnt]);
150 @@ -1357,27 +1345,42 @@ static qsize_t *inode_reserved_space(str
151         return inode->i_sb->dq_op->get_reserved_space(inode);
152  }
153  
154 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
155 +{
156 +       *inode_reserved_space(inode) += number;
157 +}
158 +
159  void inode_add_rsv_space(struct inode *inode, qsize_t number)
160  {
161         spin_lock(&inode->i_lock);
162 -       *inode_reserved_space(inode) += number;
163 +       __inode_add_rsv_space(inode, number);
164         spin_unlock(&inode->i_lock);
165  }
166  EXPORT_SYMBOL(inode_add_rsv_space);
167  
168 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
169 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
170  {
171 -       spin_lock(&inode->i_lock);
172         *inode_reserved_space(inode) -= number;
173         __inode_add_bytes(inode, number);
174 +}
175 +
176 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
177 +{
178 +       spin_lock(&inode->i_lock);
179 +       __inode_claim_rsv_space(inode, number);
180         spin_unlock(&inode->i_lock);
181  }
182  EXPORT_SYMBOL(inode_claim_rsv_space);
183  
184 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
185 +{
186 +       *inode_reserved_space(inode) -= number;
187 +}
188 +
189  void inode_sub_rsv_space(struct inode *inode, qsize_t number)
190  {
191         spin_lock(&inode->i_lock);
192 -       *inode_reserved_space(inode) -= number;
193 +       __inode_sub_rsv_space(inode, number);
194         spin_unlock(&inode->i_lock);
195  }
196  EXPORT_SYMBOL(inode_sub_rsv_space);
197 @@ -1388,9 +1391,8 @@ static qsize_t inode_get_rsv_space(struc
198  
199         if (!inode->i_sb->dq_op->get_reserved_space)
200                 return 0;
201 -       spin_lock(&inode->i_lock);
202 +
203         ret = *inode_reserved_space(inode);
204 -       spin_unlock(&inode->i_lock);
205         return ret;
206  }
207  
208 @@ -1398,17 +1400,17 @@ static void inode_incr_space(struct inod
209                                 int reserve)
210  {
211         if (reserve)
212 -               inode_add_rsv_space(inode, number);
213 +               __inode_add_rsv_space(inode, number);
214         else
215 -               inode_add_bytes(inode, number);
216 +               __inode_add_bytes(inode, number);
217  }
218  
219  static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
220  {
221         if (reserve)
222 -               inode_sub_rsv_space(inode, number);
223 +               __inode_sub_rsv_space(inode, number);
224         else
225 -               inode_sub_bytes(inode, number);
226 +               __inode_sub_bytes(inode, number);
227  }
228  
229  /*
230 @@ -1430,6 +1432,7 @@ int __dquot_alloc_space(struct inode *in
231         int warn = flags & DQUOT_SPACE_WARN;
232         int reserve = flags & DQUOT_SPACE_RESERVE;
233         int nofail = flags & DQUOT_SPACE_NOFAIL;
234 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
235  
236         /*
237          * First test before acquiring mutex - solves deadlocks when we
238 @@ -1440,47 +1443,53 @@ int __dquot_alloc_space(struct inode *in
239                 goto out;
240         }
241  
242 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
243 +       spin_lock(&inode->i_lock);
244         if (IS_NOQUOTA(inode)) {
245                 inode_incr_space(inode, number, reserve);
246 -               goto out_unlock;
247 +               spin_unlock(&inode->i_lock);
248 +               goto out;
249         }
250  
251 -       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
252 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
253                 warntype[cnt] = QUOTA_NL_NOWARN;
254 +       }
255  
256         spin_lock(&dq_data_lock);
257         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
258 -               if (!inode->i_dquot[cnt])
259 +               dquot[cnt] = inode->i_dquot[cnt];
260 +               if (!dquot[cnt])
261                         continue;
262 -               if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
263 +               atomic_inc(&dquot[cnt]->dq_count);
264 +               if (check_bdq(dquot[cnt], number, warn, warntype+cnt)
265                     == NO_QUOTA && !nofail) {
266                         ret = NO_QUOTA;
267                         spin_unlock(&dq_data_lock);
268 +                       spin_unlock(&inode->i_lock);
269                         goto out_flush_warn;
270                 }
271         }
272         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
273 -               if (!inode->i_dquot[cnt])
274 +               if (!dquot[cnt])
275                         continue;
276                 if (reserve)
277 -                       dquot_resv_space(inode->i_dquot[cnt], number);
278 +                       dquot_resv_space(dquot[cnt], number);
279                 else
280 -                       dquot_incr_space(inode->i_dquot[cnt], number);
281 +                       dquot_incr_space(dquot[cnt], number);
282         }
283         inode_incr_space(inode, number, reserve);
284         spin_unlock(&dq_data_lock);
285 +       spin_unlock(&inode->i_lock);
286  
287         if (reserve)
288                 goto out_flush_warn;
289         /* Dirtify all the dquots - this can block when journalling */
290         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
291 -               if (inode->i_dquot[cnt])
292 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
293 +               if (dquot[cnt])
294 +                       mark_dquot_dirty(dquot[cnt]);
295  out_flush_warn:
296 -       flush_warnings(inode->i_dquot, warntype);
297 -out_unlock:
298 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
299 +       flush_warnings(dquot, warntype);
300 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
301 +               dqput(dquot[cnt]);
302  out:
303         return ret;
304  }
305 @@ -1508,6 +1517,7 @@ int dquot_alloc_inode(const struct inode
306  {
307         int cnt, ret = NO_QUOTA;
308         char warntype[MAXQUOTAS];
309 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
310  
311         /* First test before acquiring mutex - solves deadlocks when we
312           * re-enter the quota code and are already holding the mutex */
313 @@ -1515,35 +1525,41 @@ int dquot_alloc_inode(const struct inode
314                 return QUOTA_OK;
315         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
316                 warntype[cnt] = QUOTA_NL_NOWARN;
317 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
318 +
319 +       spin_lock(&((struct inode *)inode)->i_lock);
320         if (IS_NOQUOTA(inode)) {
321 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
322 +               spin_unlock(&((struct inode *)inode)->i_lock);
323                 return QUOTA_OK;
324         }
325         spin_lock(&dq_data_lock);
326         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
327 -               if (!inode->i_dquot[cnt])
328 +               dquot[cnt] = inode->i_dquot[cnt];
329 +               if (!dquot[cnt])
330                         continue;
331 -               if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
332 +               atomic_inc(&dquot[cnt]->dq_count);
333 +               if (check_idq(dquot[cnt], number, warntype+cnt)
334                     == NO_QUOTA)
335                         goto warn_put_all;
336         }
337  
338         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
339 -               if (!inode->i_dquot[cnt])
340 +               if (!dquot[cnt])
341                         continue;
342 -               dquot_incr_inodes(inode->i_dquot[cnt], number);
343 +               dquot_incr_inodes(dquot[cnt], number);
344         }
345         ret = QUOTA_OK;
346  warn_put_all:
347         spin_unlock(&dq_data_lock);
348 +       spin_unlock(&((struct inode *)inode)->i_lock);
349 +
350         if (ret == QUOTA_OK)
351                 /* Dirtify all the dquots - this can block when journalling */
352                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
353 -                       if (inode->i_dquot[cnt])
354 -                               mark_dquot_dirty(inode->i_dquot[cnt]);
355 -       flush_warnings(inode->i_dquot, warntype);
356 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
357 +                       if (dquot[cnt])
358 +                               mark_dquot_dirty(dquot[cnt]);
359 +       flush_warnings(dquot, warntype);
360 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
361 +               dqput(dquot[cnt]);
362         return ret;
363  }
364  EXPORT_SYMBOL(dquot_alloc_inode);
365 @@ -1552,34 +1568,40 @@ int dquot_claim_space(struct inode *inod
366  {
367         int cnt;
368         int ret = QUOTA_OK;
369 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
370  
371         if (IS_NOQUOTA(inode)) {
372                 inode_claim_rsv_space(inode, number);
373                 goto out;
374         }
375  
376 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
377 +       spin_lock(&inode->i_lock);
378         if (IS_NOQUOTA(inode))  {
379 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
380 -               inode_claim_rsv_space(inode, number);
381 +               __inode_claim_rsv_space(inode, number);
382 +               spin_unlock(&inode->i_lock);
383                 goto out;
384         }
385  
386         spin_lock(&dq_data_lock);
387         /* Claim reserved quotas to allocated quotas */
388         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
389 -               if (inode->i_dquot[cnt])
390 -                       dquot_claim_reserved_space(inode->i_dquot[cnt],
391 -                                                       number);
392 +               dquot[cnt] = inode->i_dquot[cnt];
393 +               if (dquot[cnt]) {
394 +                       atomic_inc(&dquot[cnt]->dq_count);
395 +                       dquot_claim_reserved_space(dquot[cnt], number);
396 +               }
397         }
398         /* Update inode bytes */
399 -       inode_claim_rsv_space(inode, number);
400 +       __inode_claim_rsv_space(inode, number);
401         spin_unlock(&dq_data_lock);
402 +       spin_unlock(&inode->i_lock);
403 +
404         /* Dirtify all the dquots - this can block when journalling */
405         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
406 -               if (inode->i_dquot[cnt])
407 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
408 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
409 +               if (dquot[cnt])
410 +                       mark_dquot_dirty(dquot[cnt]);
411 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
412 +               dqput(dquot[cnt]);
413  out:
414         return ret;
415  }
416 @@ -1593,6 +1615,7 @@ int __dquot_free_space(struct inode *ino
417         unsigned int cnt;
418         char warntype[MAXQUOTAS];
419         int reserve = flags & DQUOT_SPACE_RESERVE;
420 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
421  
422         /* First test before acquiring mutex - solves deadlocks when we
423           * re-enter the quota code and are already holding the mutex */
424 @@ -1602,34 +1625,37 @@ out_sub:
425                 return QUOTA_OK;
426         }
427  
428 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
429 -       /* Now recheck reliably when holding dqptr_sem */
430 +       spin_lock(&inode->i_lock);
431         if (IS_NOQUOTA(inode)) {
432 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
433 +               spin_unlock(&inode->i_lock);
434                 goto out_sub;
435         }
436         spin_lock(&dq_data_lock);
437         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
438 -               if (!inode->i_dquot[cnt])
439 +               dquot[cnt] = inode->i_dquot[cnt];
440 +               if (!dquot[cnt])
441                         continue;
442 -               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
443 +               atomic_inc(&dquot[cnt]->dq_count);
444 +               warntype[cnt] = info_bdq_free(dquot[cnt], number);
445                 if (reserve)
446 -                       dquot_free_reserved_space(inode->i_dquot[cnt], number);
447 +                       dquot_free_reserved_space(dquot[cnt], number);
448                 else
449 -                       dquot_decr_space(inode->i_dquot[cnt], number);
450 +                       dquot_decr_space(dquot[cnt], number);
451         }
452         inode_decr_space(inode, number, reserve);
453         spin_unlock(&dq_data_lock);
454 +       spin_unlock(&inode->i_lock);
455  
456         if (reserve)
457 -               goto out_unlock;
458 +               goto out;
459         /* Dirtify all the dquots - this can block when journalling */
460         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
461 -               if (inode->i_dquot[cnt])
462 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
463 -out_unlock:
464 -       flush_warnings(inode->i_dquot, warntype);
465 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
466 +               if (dquot[cnt])
467 +                       mark_dquot_dirty(dquot[cnt]);
468 +out:
469 +       flush_warnings(dquot, warntype);
470 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
471 +               dqput(dquot[cnt]);
472         return QUOTA_OK;
473  }
474  
475 @@ -1656,32 +1682,37 @@ int dquot_free_inode(const struct inode 
476  {
477         unsigned int cnt;
478         char warntype[MAXQUOTAS];
479 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
480  
481         /* First test before acquiring mutex - solves deadlocks when we
482           * re-enter the quota code and are already holding the mutex */
483         if (IS_NOQUOTA(inode))
484                 return QUOTA_OK;
485  
486 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
487 -       /* Now recheck reliably when holding dqptr_sem */
488 +       spin_lock(&((struct inode *)inode)->i_lock);
489         if (IS_NOQUOTA(inode)) {
490 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
491 +               spin_unlock(&((struct inode *)inode)->i_lock);
492                 return QUOTA_OK;
493         }
494         spin_lock(&dq_data_lock);
495         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
496 -               if (!inode->i_dquot[cnt])
497 +               dquot[cnt] = inode->i_dquot[cnt];
498 +               if (!dquot[cnt])
499                         continue;
500 -               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
501 -               dquot_decr_inodes(inode->i_dquot[cnt], number);
502 +               atomic_inc(&dquot[cnt]->dq_count);
503 +               warntype[cnt] = info_idq_free(dquot[cnt], number);
504 +               dquot_decr_inodes(dquot[cnt], number);
505         }
506         spin_unlock(&dq_data_lock);
507 +       spin_unlock(&((struct inode *)inode)->i_lock);
508 +
509         /* Dirtify all the dquots - this can block when journalling */
510         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
511 -               if (inode->i_dquot[cnt])
512 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
513 -       flush_warnings(inode->i_dquot, warntype);
514 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
515 +               if (dquot[cnt])
516 +                       mark_dquot_dirty(dquot[cnt]);
517 +       flush_warnings(dquot, warntype);
518 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
519 +               dqput(dquot[cnt]);
520         return QUOTA_OK;
521  }
522  EXPORT_SYMBOL(dquot_free_inode);
523 @@ -1721,14 +1752,13 @@ int dquot_transfer(struct inode *inode, 
524                 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
525                                               GRPQUOTA);
526  
527 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
528 -       /* Now recheck reliably when holding dqptr_sem */
529 +       spin_lock(&inode->i_lock);
530         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
531 -               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
532 +               spin_unlock(&inode->i_lock);
533                 goto put_all;
534         }
535         spin_lock(&dq_data_lock);
536 -       cur_space = inode_get_bytes(inode);
537 +       cur_space = __inode_get_bytes(inode);
538         rsv_space = inode_get_rsv_space(inode);
539         space = cur_space + rsv_space;
540         /* Build the transfer_from list and check the limits */
541 @@ -1771,7 +1801,7 @@ int dquot_transfer(struct inode *inode, 
542                 inode->i_dquot[cnt] = transfer_to[cnt];
543         }
544         spin_unlock(&dq_data_lock);
545 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
546 +       spin_unlock(&inode->i_lock);
547  
548         /* Dirtify all the dquots - this can block when journalling */
549         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
550 @@ -1795,7 +1825,7 @@ put_all:
551         return ret;
552  over_quota:
553         spin_unlock(&dq_data_lock);
554 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
555 +       spin_unlock(&inode->i_lock);
556         /* Clear dquot pointers we don't want to dqput() */
557         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
558                 transfer_from[cnt] = NULL;
559 @@ -2047,13 +2077,13 @@ static int vfs_load_quota_inode(struct i
560                 /* We don't want quota and atime on quota files (deadlocks
561                  * possible) Also nobody should write to the file - we use
562                  * special IO operations which ignore the immutable bit. */
563 -               down_write(&dqopt->dqptr_sem);
564                 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
565 +               spin_lock(&inode->i_lock);
566                 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
567                                              S_NOQUOTA);
568                 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
569 +               spin_unlock(&inode->i_lock);
570                 mutex_unlock(&inode->i_mutex);
571 -               up_write(&dqopt->dqptr_sem);
572                 sb->dq_op->drop(inode);
573         }
574  
575 @@ -2090,14 +2120,14 @@ out_file_init:
576         iput(inode);
577  out_lock:
578         if (oldflags != -1) {
579 -               down_write(&dqopt->dqptr_sem);
580                 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
581 +               spin_lock(&inode->i_lock);
582                 /* Set the flags back (in the case of accidental quotaon()
583                  * on a wrong file we don't want to mess up the flags) */
584                 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
585                 inode->i_flags |= oldflags;
586 +               spin_unlock(&inode->i_lock);
587                 mutex_unlock(&inode->i_mutex);
588 -               up_write(&dqopt->dqptr_sem);
589         }
590         mutex_unlock(&dqopt->dqonoff_mutex);
591  out_fmt:
592 Index: linux-2.6.32-358.0.1.el6/fs/quota/quota.c
593 ===================================================================
594 --- linux-2.6.32-358.0.1.el6.orig/fs/quota/quota.c
595 +++ linux-2.6.32-358.0.1.el6/fs/quota/quota.c
596 @@ -257,13 +257,13 @@ static int do_quotactl(struct super_bloc
597                 case Q_GETFMT: {
598                         __u32 fmt;
599  
600 -                       down_read(&sb_dqopt(sb)->dqptr_sem);
601 +                       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
602                         if (!sb_has_quota_active(sb, type)) {
603 -                               up_read(&sb_dqopt(sb)->dqptr_sem);
604 +                               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
605                                 return -ESRCH;
606                         }
607                         fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
608 -                       up_read(&sb_dqopt(sb)->dqptr_sem);
609 +                       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
610                         if (copy_to_user(addr, &fmt, sizeof(fmt)))
611                                 return -EFAULT;
612                         return 0;
613 Index: linux-2.6.32-358.0.1.el6/fs/stat.c
614 ===================================================================
615 --- linux-2.6.32-358.0.1.el6.orig/fs/stat.c
616 +++ linux-2.6.32-358.0.1.el6/fs/stat.c
617 @@ -422,9 +422,8 @@ void inode_add_bytes(struct inode *inode
618  
619  EXPORT_SYMBOL(inode_add_bytes);
620  
621 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
622 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
623  {
624 -       spin_lock(&inode->i_lock);
625         inode->i_blocks -= bytes >> 9;
626         bytes &= 511;
627         if (inode->i_bytes < bytes) {
628 @@ -432,17 +431,28 @@ void inode_sub_bytes(struct inode *inode
629                 inode->i_bytes += 512;
630         }
631         inode->i_bytes -= bytes;
632 +}
633 +
634 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
635 +{
636 +       spin_lock(&inode->i_lock);
637 +       __inode_sub_bytes(inode, bytes);
638         spin_unlock(&inode->i_lock);
639  }
640  
641  EXPORT_SYMBOL(inode_sub_bytes);
642  
643 +loff_t __inode_get_bytes(struct inode *inode)
644 +{
645 +       return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
646 +}
647 +
648  loff_t inode_get_bytes(struct inode *inode)
649  {
650         loff_t ret;
651  
652         spin_lock(&inode->i_lock);
653 -       ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
654 +       ret = __inode_get_bytes(inode);
655         spin_unlock(&inode->i_lock);
656         return ret;
657  }
658 Index: linux-2.6.32-358.0.1.el6/fs/super.c
659 ===================================================================
660 --- linux-2.6.32-358.0.1.el6.orig/fs/super.c
661 +++ linux-2.6.32-358.0.1.el6/fs/super.c
662 @@ -146,7 +146,6 @@ static struct super_block *alloc_super(s
663                 mutex_init(&s->s_vfs_rename_mutex);
664                 mutex_init(&s->s_dquot.dqio_mutex);
665                 mutex_init(&s->s_dquot.dqonoff_mutex);
666 -               init_rwsem(&s->s_dquot.dqptr_sem);
667                 init_waitqueue_head(&s->s_wait_unfrozen);
668                 s->s_maxbytes = MAX_NON_LFS;
669                 s->dq_op = sb_dquot_ops;
670 Index: linux-2.6.32-358.0.1.el6/include/linux/fs.h
671 ===================================================================
672 --- linux-2.6.32-358.0.1.el6.orig/include/linux/fs.h
673 +++ linux-2.6.32-358.0.1.el6/include/linux/fs.h
674 @@ -2567,7 +2567,9 @@ extern void generic_fillattr(struct inod
675  extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
676  void __inode_add_bytes(struct inode *inode, loff_t bytes);
677  void inode_add_bytes(struct inode *inode, loff_t bytes);
678 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
679  void inode_sub_bytes(struct inode *inode, loff_t bytes);
680 +loff_t __inode_get_bytes(struct inode *inode);
681  loff_t inode_get_bytes(struct inode *inode);
682  void inode_set_bytes(struct inode *inode, loff_t bytes);
683