Whamcloud - gitweb
LU-3335 scrub: purge inconsistenct objects after OI scrub
[fs/lustre-release.git] / lustre / kernel_patches / patches / replace_dqptr_sem.patch
1 Remove dqptr_sem (but kept in struct quota_info to keep kernel ABI
2 unchanged), and the functionality of this lock is implemented by
3 other locks:
4 * i_dquot is protected by i_lock, however only this pointer, the
5   content of this struct is by dq_data_lock.
6 * Q_GETFMT is now protected with dqonoff_mutex instead of dqptr_sem.
7
8  fs/quota/dquot.c   |  243 +++++++++++++++++++++++++++++------------------------
9  fs/quota/quota.c   |    6 -
10  fs/stat.c          |   16 ++-
11  fs/super.c         |    1
12  include/linux/fs.h |    2
13  5 files changed, 154 insertions(+), 114 deletions(-)
14
15 --- linux-2.6.32-279.el6.x86_64/fs/quota/dquot.c        2012-06-14 05:40:59.000000000 +0800
16 +++ linux-2.6.32-279.el6.x86_64.quota/fs/quota/dquot.c  2013-03-07 16:24:24.602781757 +0800
17 @@ -83,22 +83,17 @@
18  /*
19   * There are three quota SMP locks. dq_list_lock protects all lists with quotas
20   * and quota formats, dqstats structure containing statistics about the lists
21 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
22 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
23 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
24 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
25 - * modifications of quota state (on quotaon and quotaoff) and readers who care
26 - * about latest values take it as well.
27 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
28 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
29 + * and readers who care about latest values take it as well.
30   *
31 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
32 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
33   *   dq_list_lock > dq_state_lock
34   *
35   * Note that some things (eg. sb pointer, type, id) doesn't change during
36   * the life of the dquot structure and so needn't to be protected by a lock
37   *
38 - * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
39 - * operation is just reading pointers from inode (or not using them at all) the
40 - * read lock is enough. If pointers are altered function must hold write lock
41 + * Any operation working on dquots via inode pointers must hold i_lock.
42   * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
43   * for altering the flag i_mutex is also needed).
44   *
45 @@ -112,15 +107,8 @@
46   * spinlock to internal buffers before writing.
47   *
48   * Lock ordering (including related VFS locks) is the following:
49 - *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
50 - *   dqio_mutex
51 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
52 - * dqptr_sem. But filesystem has to count with the fact that functions such as
53 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
54 - * from inside a transaction to keep filesystem consistency after a crash. Also
55 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
56 - * called with dqptr_sem held.
57 - * i_mutex on quota files is special (it's below dqio_mutex)
58 + *  i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
59 + *  i_mutex on quota files is special (it's below dqio_mutex)
60   */
61  
62  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
63 @@ -868,7 +856,6 @@
64  /*
65   * Remove references to dquots from inode and add dquot to list for freeing
66   * if we have the last referece to dquot
67 - * We can't race with anybody because we hold dqptr_sem for writing...
68   */
69  static int remove_inode_dquot_ref(struct inode *inode, int type,
70                                   struct list_head *tofree_head)
71 @@ -926,10 +913,12 @@
72                  *  We have to scan also I_NEW inodes because they can already
73                  *  have quota pointer initialized. Luckily, we need to touch
74                  *  only quota pointers and these have separate locking
75 -                *  (dqptr_sem).
76 +                *  (i_lock).
77                  */
78 +               spin_lock(&inode->i_lock);
79                 if (!IS_NOQUOTA(inode))
80                         remove_inode_dquot_ref(inode, type, tofree_head);
81 +               spin_unlock(&inode->i_lock);
82         }
83         spin_unlock(&inode_lock);
84  }
85 @@ -940,9 +929,7 @@
86         LIST_HEAD(tofree_head);
87  
88         if (sb->dq_op) {
89 -               down_write(&sb_dqopt(sb)->dqptr_sem);
90                 remove_dquot_ref(sb, type, &tofree_head);
91 -               up_write(&sb_dqopt(sb)->dqptr_sem);
92                 put_dquot_list(&tofree_head);
93         }
94  }
95 @@ -1239,8 +1226,6 @@
96  
97  /*
98   *     Initialize quota pointers in inode
99 - *     We do things in a bit complicated way but by that we avoid calling
100 - *     dqget() and thus filesystem callbacks under dqptr_sem.
101   */
102  int dquot_initialize(struct inode *inode, int type)
103  {
104 @@ -1270,8 +1255,7 @@
105                 got[cnt] = dqget(sb, id, cnt);
106         }
107  
108 -       down_write(&sb_dqopt(sb)->dqptr_sem);
109 -       /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
110 +       spin_lock(&inode->i_lock);
111         if (IS_NOQUOTA(inode))
112                 goto out_err;
113         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
114 @@ -1288,12 +1272,16 @@
115                          * did a write before quota was turned on
116                          */
117                         rsv = inode_get_rsv_space(inode);
118 -                       if (unlikely(rsv))
119 +                       if (unlikely(rsv)) {
120 +                               spin_lock(&dq_data_lock);
121                                 dquot_resv_space(inode->i_dquot[cnt], rsv);
122 +                               spin_unlock(&dq_data_lock);
123 +                       }
124                 }
125         }
126  out_err:
127 -       up_write(&sb_dqopt(sb)->dqptr_sem);
128 +       spin_unlock(&inode->i_lock);
129 +
130         /* Drop unused references */
131         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
132                 dqput(got[cnt]);
133 @@ -1309,12 +1297,12 @@
134         int cnt;
135         struct dquot *put[MAXQUOTAS];
136  
137 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
138 +       spin_lock(&inode->i_lock);
139         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
140                 put[cnt] = inode->i_dquot[cnt];
141                 inode->i_dquot[cnt] = NULL;
142         }
143 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
144 +       spin_unlock(&inode->i_lock);
145  
146         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
147                 dqput(put[cnt]);
148 @@ -1357,27 +1345,42 @@
149         return inode->i_sb->dq_op->get_reserved_space(inode);
150  }
151  
152 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
153 +{
154 +       *inode_reserved_space(inode) += number;
155 +}
156 +
157  void inode_add_rsv_space(struct inode *inode, qsize_t number)
158  {
159         spin_lock(&inode->i_lock);
160 -       *inode_reserved_space(inode) += number;
161 +       __inode_add_rsv_space(inode, number);
162         spin_unlock(&inode->i_lock);
163  }
164  EXPORT_SYMBOL(inode_add_rsv_space);
165  
166 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
167 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
168  {
169 -       spin_lock(&inode->i_lock);
170         *inode_reserved_space(inode) -= number;
171         __inode_add_bytes(inode, number);
172 +}
173 +
174 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
175 +{
176 +       spin_lock(&inode->i_lock);
177 +       __inode_claim_rsv_space(inode, number);
178         spin_unlock(&inode->i_lock);
179  }
180  EXPORT_SYMBOL(inode_claim_rsv_space);
181  
182 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
183 +{
184 +       *inode_reserved_space(inode) -= number;
185 +}
186 +
187  void inode_sub_rsv_space(struct inode *inode, qsize_t number)
188  {
189         spin_lock(&inode->i_lock);
190 -       *inode_reserved_space(inode) -= number;
191 +       __inode_sub_rsv_space(inode, number);
192         spin_unlock(&inode->i_lock);
193  }
194  EXPORT_SYMBOL(inode_sub_rsv_space);
195 @@ -1388,9 +1391,8 @@
196  
197         if (!inode->i_sb->dq_op->get_reserved_space)
198                 return 0;
199 -       spin_lock(&inode->i_lock);
200 +
201         ret = *inode_reserved_space(inode);
202 -       spin_unlock(&inode->i_lock);
203         return ret;
204  }
205  
206 @@ -1398,17 +1400,17 @@
207                                 int reserve)
208  {
209         if (reserve)
210 -               inode_add_rsv_space(inode, number);
211 +               __inode_add_rsv_space(inode, number);
212         else
213 -               inode_add_bytes(inode, number);
214 +               __inode_add_bytes(inode, number);
215  }
216  
217  static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
218  {
219         if (reserve)
220 -               inode_sub_rsv_space(inode, number);
221 +               __inode_sub_rsv_space(inode, number);
222         else
223 -               inode_sub_bytes(inode, number);
224 +               __inode_sub_bytes(inode, number);
225  }
226  
227  /*
228 @@ -1430,6 +1432,7 @@
229         int warn = flags & DQUOT_SPACE_WARN;
230         int reserve = flags & DQUOT_SPACE_RESERVE;
231         int nofail = flags & DQUOT_SPACE_NOFAIL;
232 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
233  
234         /*
235          * First test before acquiring mutex - solves deadlocks when we
236 @@ -1440,20 +1443,24 @@
237                 goto out;
238         }
239  
240 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
241 +       spin_lock(&inode->i_lock);
242         if (IS_NOQUOTA(inode)) {
243                 inode_incr_space(inode, number, reserve);
244 -               goto out_unlock;
245 +               spin_unlock(&inode->i_lock);
246 +               goto out;
247         }
248  
249 -       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
250 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
251                 warntype[cnt] = QUOTA_NL_NOWARN;
252 +       }
253  
254         spin_lock(&dq_data_lock);
255         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
256 -               if (!inode->i_dquot[cnt])
257 +               dquot[cnt] = inode->i_dquot[cnt];
258 +               if (!dquot[cnt])
259                         continue;
260 -               if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
261 +               atomic_inc(&dquot[cnt]->dq_count);
262 +               if (check_bdq(dquot[cnt], number, warn, warntype+cnt)
263                     == NO_QUOTA && !nofail) {
264                         ret = NO_QUOTA;
265                         spin_unlock(&dq_data_lock);
266 @@ -1461,26 +1468,27 @@
267                 }
268         }
269         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
270 -               if (!inode->i_dquot[cnt])
271 +               if (!dquot[cnt])
272                         continue;
273                 if (reserve)
274 -                       dquot_resv_space(inode->i_dquot[cnt], number);
275 +                       dquot_resv_space(dquot[cnt], number);
276                 else
277 -                       dquot_incr_space(inode->i_dquot[cnt], number);
278 +                       dquot_incr_space(dquot[cnt], number);
279         }
280         inode_incr_space(inode, number, reserve);
281         spin_unlock(&dq_data_lock);
282 +       spin_unlock(&inode->i_lock);
283  
284         if (reserve)
285                 goto out_flush_warn;
286         /* Dirtify all the dquots - this can block when journalling */
287         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
288 -               if (inode->i_dquot[cnt])
289 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
290 +               if (dquot[cnt])
291 +                       mark_dquot_dirty(dquot[cnt]);
292  out_flush_warn:
293 -       flush_warnings(inode->i_dquot, warntype);
294 -out_unlock:
295 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
296 +       flush_warnings(dquot, warntype);
297 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
298 +               dqput(dquot[cnt]);
299  out:
300         return ret;
301  }
302 @@ -1508,6 +1516,7 @@
303  {
304         int cnt, ret = NO_QUOTA;
305         char warntype[MAXQUOTAS];
306 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
307  
308         /* First test before acquiring mutex - solves deadlocks when we
309           * re-enter the quota code and are already holding the mutex */
310 @@ -1515,35 +1524,41 @@
311                 return QUOTA_OK;
312         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
313                 warntype[cnt] = QUOTA_NL_NOWARN;
314 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
315 +
316 +       spin_lock(&((struct inode *)inode)->i_lock);
317         if (IS_NOQUOTA(inode)) {
318 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
319 +               spin_unlock(&((struct inode *)inode)->i_lock);
320                 return QUOTA_OK;
321         }
322         spin_lock(&dq_data_lock);
323         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
324 -               if (!inode->i_dquot[cnt])
325 +               dquot[cnt] = inode->i_dquot[cnt];
326 +               if (!dquot[cnt])
327                         continue;
328 -               if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
329 +               atomic_inc(&dquot[cnt]->dq_count);
330 +               if (check_idq(dquot[cnt], number, warntype+cnt)
331                     == NO_QUOTA)
332                         goto warn_put_all;
333         }
334  
335         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
336 -               if (!inode->i_dquot[cnt])
337 +               if (!dquot[cnt])
338                         continue;
339 -               dquot_incr_inodes(inode->i_dquot[cnt], number);
340 +               dquot_incr_inodes(dquot[cnt], number);
341         }
342         ret = QUOTA_OK;
343  warn_put_all:
344         spin_unlock(&dq_data_lock);
345 +       spin_unlock(&((struct inode *)inode)->i_lock);
346 +
347         if (ret == QUOTA_OK)
348                 /* Dirtify all the dquots - this can block when journalling */
349                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
350 -                       if (inode->i_dquot[cnt])
351 -                               mark_dquot_dirty(inode->i_dquot[cnt]);
352 -       flush_warnings(inode->i_dquot, warntype);
353 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
354 +                       if (dquot[cnt])
355 +                               mark_dquot_dirty(dquot[cnt]);
356 +       flush_warnings(dquot, warntype);
357 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
358 +               dqput(dquot[cnt]);
359         return ret;
360  }
361  EXPORT_SYMBOL(dquot_alloc_inode);
362 @@ -1552,34 +1567,40 @@
363  {
364         int cnt;
365         int ret = QUOTA_OK;
366 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
367  
368         if (IS_NOQUOTA(inode)) {
369                 inode_claim_rsv_space(inode, number);
370                 goto out;
371         }
372  
373 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
374 +       spin_lock(&inode->i_lock);
375         if (IS_NOQUOTA(inode))  {
376 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
377 -               inode_claim_rsv_space(inode, number);
378 +               __inode_claim_rsv_space(inode, number);
379 +               spin_unlock(&inode->i_lock);
380                 goto out;
381         }
382  
383         spin_lock(&dq_data_lock);
384         /* Claim reserved quotas to allocated quotas */
385         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
386 -               if (inode->i_dquot[cnt])
387 -                       dquot_claim_reserved_space(inode->i_dquot[cnt],
388 -                                                       number);
389 +               dquot[cnt] = inode->i_dquot[cnt];
390 +               if (dquot[cnt]) {
391 +                       atomic_inc(&dquot[cnt]->dq_count);
392 +                       dquot_claim_reserved_space(dquot[cnt], number);
393 +               }
394         }
395         /* Update inode bytes */
396 -       inode_claim_rsv_space(inode, number);
397 +       __inode_claim_rsv_space(inode, number);
398         spin_unlock(&dq_data_lock);
399 +       spin_unlock(&inode->i_lock);
400 +
401         /* Dirtify all the dquots - this can block when journalling */
402         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
403 -               if (inode->i_dquot[cnt])
404 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
405 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
406 +               if (dquot[cnt])
407 +                       mark_dquot_dirty(dquot[cnt]);
408 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
409 +               dqput(dquot[cnt]);
410  out:
411         return ret;
412  }
413 @@ -1593,6 +1614,7 @@
414         unsigned int cnt;
415         char warntype[MAXQUOTAS];
416         int reserve = flags & DQUOT_SPACE_RESERVE;
417 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
418  
419         /* First test before acquiring mutex - solves deadlocks when we
420           * re-enter the quota code and are already holding the mutex */
421 @@ -1602,34 +1624,37 @@
422                 return QUOTA_OK;
423         }
424  
425 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
426 -       /* Now recheck reliably when holding dqptr_sem */
427 +       spin_lock(&inode->i_lock);
428         if (IS_NOQUOTA(inode)) {
429 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
430 +               spin_unlock(&inode->i_lock);
431                 goto out_sub;
432         }
433         spin_lock(&dq_data_lock);
434         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
435 -               if (!inode->i_dquot[cnt])
436 +               dquot[cnt] = inode->i_dquot[cnt];
437 +               if (!dquot[cnt])
438                         continue;
439 -               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
440 +               atomic_inc(&dquot[cnt]->dq_count);
441 +               warntype[cnt] = info_bdq_free(dquot[cnt], number);
442                 if (reserve)
443 -                       dquot_free_reserved_space(inode->i_dquot[cnt], number);
444 +                       dquot_free_reserved_space(dquot[cnt], number);
445                 else
446 -                       dquot_decr_space(inode->i_dquot[cnt], number);
447 +                       dquot_decr_space(dquot[cnt], number);
448         }
449         inode_decr_space(inode, number, reserve);
450         spin_unlock(&dq_data_lock);
451 +       spin_unlock(&inode->i_lock);
452  
453         if (reserve)
454 -               goto out_unlock;
455 +               goto out;
456         /* Dirtify all the dquots - this can block when journalling */
457         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
458 -               if (inode->i_dquot[cnt])
459 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
460 -out_unlock:
461 -       flush_warnings(inode->i_dquot, warntype);
462 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
463 +               if (dquot[cnt])
464 +                       mark_dquot_dirty(dquot[cnt]);
465 +out:
466 +       flush_warnings(dquot, warntype);
467 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
468 +               dqput(dquot[cnt]);
469         return QUOTA_OK;
470  }
471  
472 @@ -1656,32 +1681,37 @@
473  {
474         unsigned int cnt;
475         char warntype[MAXQUOTAS];
476 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
477  
478         /* First test before acquiring mutex - solves deadlocks when we
479           * re-enter the quota code and are already holding the mutex */
480         if (IS_NOQUOTA(inode))
481                 return QUOTA_OK;
482  
483 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
484 -       /* Now recheck reliably when holding dqptr_sem */
485 +       spin_lock(&((struct inode *)inode)->i_lock);
486         if (IS_NOQUOTA(inode)) {
487 -               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
488 +               spin_unlock(&((struct inode *)inode)->i_lock);
489                 return QUOTA_OK;
490         }
491         spin_lock(&dq_data_lock);
492         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
493 -               if (!inode->i_dquot[cnt])
494 +               dquot[cnt] = inode->i_dquot[cnt];
495 +               if (!dquot[cnt])
496                         continue;
497 -               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
498 -               dquot_decr_inodes(inode->i_dquot[cnt], number);
499 +               atomic_inc(&dquot[cnt]->dq_count);
500 +               warntype[cnt] = info_idq_free(dquot[cnt], number);
501 +               dquot_decr_inodes(dquot[cnt], number);
502         }
503         spin_unlock(&dq_data_lock);
504 +       spin_unlock(&((struct inode *)inode)->i_lock);
505 +
506         /* Dirtify all the dquots - this can block when journalling */
507         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
508 -               if (inode->i_dquot[cnt])
509 -                       mark_dquot_dirty(inode->i_dquot[cnt]);
510 -       flush_warnings(inode->i_dquot, warntype);
511 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
512 +               if (dquot[cnt])
513 +                       mark_dquot_dirty(dquot[cnt]);
514 +       flush_warnings(dquot, warntype);
515 +       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
516 +               dqput(dquot[cnt]);
517         return QUOTA_OK;
518  }
519  EXPORT_SYMBOL(dquot_free_inode);
520 @@ -1721,14 +1751,13 @@
521                 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
522                                               GRPQUOTA);
523  
524 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
525 -       /* Now recheck reliably when holding dqptr_sem */
526 +       spin_lock(&inode->i_lock);
527         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
528 -               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
529 +               spin_unlock(&inode->i_lock);
530                 goto put_all;
531         }
532         spin_lock(&dq_data_lock);
533 -       cur_space = inode_get_bytes(inode);
534 +       cur_space = __inode_get_bytes(inode);
535         rsv_space = inode_get_rsv_space(inode);
536         space = cur_space + rsv_space;
537         /* Build the transfer_from list and check the limits */
538 @@ -1771,7 +1800,7 @@
539                 inode->i_dquot[cnt] = transfer_to[cnt];
540         }
541         spin_unlock(&dq_data_lock);
542 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
543 +       spin_unlock(&inode->i_lock);
544  
545         /* Dirtify all the dquots - this can block when journalling */
546         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
547 @@ -1795,7 +1824,7 @@
548         return ret;
549  over_quota:
550         spin_unlock(&dq_data_lock);
551 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
552 +       spin_unlock(&inode->i_lock);
553         /* Clear dquot pointers we don't want to dqput() */
554         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
555                 transfer_from[cnt] = NULL;
556 @@ -2047,13 +2076,13 @@
557                 /* We don't want quota and atime on quota files (deadlocks
558                  * possible) Also nobody should write to the file - we use
559                  * special IO operations which ignore the immutable bit. */
560 -               down_write(&dqopt->dqptr_sem);
561                 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
562 +               spin_lock(&inode->i_lock);
563                 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
564                                              S_NOQUOTA);
565                 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
566 +               spin_unlock(&inode->i_lock);
567                 mutex_unlock(&inode->i_mutex);
568 -               up_write(&dqopt->dqptr_sem);
569                 sb->dq_op->drop(inode);
570         }
571  
572 @@ -2090,14 +2119,14 @@
573         iput(inode);
574  out_lock:
575         if (oldflags != -1) {
576 -               down_write(&dqopt->dqptr_sem);
577                 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
578 +               spin_lock(&inode->i_lock);
579                 /* Set the flags back (in the case of accidental quotaon()
580                  * on a wrong file we don't want to mess up the flags) */
581                 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
582                 inode->i_flags |= oldflags;
583 +               spin_unlock(&inode->i_lock);
584                 mutex_unlock(&inode->i_mutex);
585 -               up_write(&dqopt->dqptr_sem);
586         }
587         mutex_unlock(&dqopt->dqonoff_mutex);
588  out_fmt:
589 --- linux-2.6.32-279.el6.x86_64/fs/quota/quota.c        2012-06-14 05:41:12.000000000 +0800
590 +++ linux-2.6.32-279.el6.x86_64.quota/fs/quota/quota.c  2013-02-21 17:19:03.530511632 +0800
591 @@ -257,13 +257,13 @@
592                 case Q_GETFMT: {
593                         __u32 fmt;
594  
595 -                       down_read(&sb_dqopt(sb)->dqptr_sem);
596 +                       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
597                         if (!sb_has_quota_active(sb, type)) {
598 -                               up_read(&sb_dqopt(sb)->dqptr_sem);
599 +                               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
600                                 return -ESRCH;
601                         }
602                         fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
603 -                       up_read(&sb_dqopt(sb)->dqptr_sem);
604 +                       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
605                         if (copy_to_user(addr, &fmt, sizeof(fmt)))
606                                 return -EFAULT;
607                         return 0;
608 --- linux-2.6.32-279.el6.x86_64/fs/stat.c       2012-06-14 05:41:12.000000000 +0800
609 +++ linux-2.6.32-279.el6.x86_64.quota/fs/stat.c 2013-03-04 23:14:19.471077448 +0800
610 @@ -422,9 +422,8 @@
611  
612  EXPORT_SYMBOL(inode_add_bytes);
613  
614 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
615 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
616  {
617 -       spin_lock(&inode->i_lock);
618         inode->i_blocks -= bytes >> 9;
619         bytes &= 511;
620         if (inode->i_bytes < bytes) {
621 @@ -432,17 +431,28 @@
622                 inode->i_bytes += 512;
623         }
624         inode->i_bytes -= bytes;
625 +}
626 +
627 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
628 +{
629 +       spin_lock(&inode->i_lock);
630 +       __inode_sub_bytes(inode, bytes);
631         spin_unlock(&inode->i_lock);
632  }
633  
634  EXPORT_SYMBOL(inode_sub_bytes);
635  
636 +loff_t __inode_get_bytes(struct inode *inode)
637 +{
638 +       return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
639 +}
640 +
641  loff_t inode_get_bytes(struct inode *inode)
642  {
643         loff_t ret;
644  
645         spin_lock(&inode->i_lock);
646 -       ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
647 +       ret = __inode_get_bytes(inode);
648         spin_unlock(&inode->i_lock);
649         return ret;
650  }
651 --- linux-2.6.32-279.el6.x86_64/fs/super.c      2012-06-14 05:41:12.000000000 +0800
652 +++ linux-2.6.32-279.el6.x86_64.quota/fs/super.c        2013-02-21 17:19:03.530511632 +0800
653 @@ -97,7 +97,6 @@
654                 mutex_init(&s->s_vfs_rename_mutex);
655                 mutex_init(&s->s_dquot.dqio_mutex);
656                 mutex_init(&s->s_dquot.dqonoff_mutex);
657 -               init_rwsem(&s->s_dquot.dqptr_sem);
658                 init_waitqueue_head(&s->s_wait_unfrozen);
659                 s->s_maxbytes = MAX_NON_LFS;
660                 s->dq_op = sb_dquot_ops;
661 --- linux-2.6.32-279.el6.x86_64/include/linux/fs.h      2012-06-14 05:41:19.000000000 +0800
662 +++ linux-2.6.32-279.el6.x86_64.quota/include/linux/fs.h        2013-02-21 17:19:03.531522089 +0800
663 @@ -2396,7 +2400,9 @@
664  extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
665  void __inode_add_bytes(struct inode *inode, loff_t bytes);
666  void inode_add_bytes(struct inode *inode, loff_t bytes);
667 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
668  void inode_sub_bytes(struct inode *inode, loff_t bytes);
669 +loff_t __inode_get_bytes(struct inode *inode);
670  loff_t inode_get_bytes(struct inode *inode);
671  void inode_set_bytes(struct inode *inode, loff_t bytes);
672