Whamcloud - gitweb
LU-4008 mdt: update mdt_getattr comments about EA sizing
[fs/lustre-release.git] / lustre / kernel_patches / patches / quota-replace-dqptr-sem-sles11sp2.patch
1 diff -urp linux-3.0.61-0.orig/fs/quota/dquot.c linux-3.0.61-0/fs/quota/dquot.c
2 --- linux-3.0.61-0.orig/fs/quota/dquot.c        2013-04-10 15:15:11.000000000 -0400
3 +++ linux-3.0.61-0/fs/quota/dquot.c     2013-04-24 10:27:22.000000000 -0400
4 @@ -83,26 +83,21 @@
5  /*
6   * There are three quota SMP locks. dq_list_lock protects all lists with quotas
7   * and quota formats.
8 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
9 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
10 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
11 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
12 - * modifications of quota state (on quotaon and quotaoff) and readers who care
13 - * about latest values take it as well.
14 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
15 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
16 + * and readers who care about latest values take it as well.
17   *
18 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
19 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
20   *   dq_list_lock > dq_state_lock
21   *
22   * Note that some things (eg. sb pointer, type, id) doesn't change during
23   * the life of the dquot structure and so needn't to be protected by a lock
24   *
25 - * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
26 - * operation is just reading pointers from inode (or not using them at all) the
27 - * read lock is enough. If pointers are altered function must hold write lock.
28 + * Any operation working on dquots via inode pointers must hold i_lock.
29   * Special care needs to be taken about S_NOQUOTA inode flag (marking that
30   * inode is a quota file). Functions adding pointers from inode to dquots have
31 - * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
32 - * have to do all pointer modifications before dropping dqptr_sem. This makes
33 + * to check this flag under i_lock and then (if S_NOQUOTA is not set) they
34 + * have to do all pointer modifications before dropping i_lock. This makes
35   * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
36   * then drops all pointers to dquots from an inode.
37   *
38 @@ -116,15 +111,8 @@
39   * spinlock to internal buffers before writing.
40   *
41   * Lock ordering (including related VFS locks) is the following:
42 - *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
43 - *   dqio_mutex
44 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
45 - * dqptr_sem. But filesystem has to count with the fact that functions such as
46 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
47 - * from inside a transaction to keep filesystem consistency after a crash. Also
48 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
49 - * called with dqptr_sem held.
50 - * i_mutex on quota files is special (it's below dqio_mutex)
51 + *  i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
52 + *  i_mutex on quota files is special (it's below dqio_mutex)
53   */
54  
55  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
56 @@ -955,7 +943,6 @@ static inline int dqput_blocks(struct dq
57  /*
58   * Remove references to dquots from inode and add dquot to list for freeing
59   * if we have the last reference to dquot
60 - * We can't race with anybody because we hold dqptr_sem for writing...
61   */
62  static int remove_inode_dquot_ref(struct inode *inode, int type,
63                                   struct list_head *tofree_head)
64 @@ -1016,13 +1003,15 @@ static void remove_dquot_ref(struct supe
65                  *  We have to scan also I_NEW inodes because they can already
66                  *  have quota pointer initialized. Luckily, we need to touch
67                  *  only quota pointers and these have separate locking
68 -                *  (dqptr_sem).
69 +                *  (i_lock).
70                  */
71 +               spin_lock(&inode->i_lock);
72                 if (!IS_NOQUOTA(inode)) {
73                         if (unlikely(inode_get_rsv_space(inode) > 0))
74                                 reserved = 1;
75                         remove_inode_dquot_ref(inode, type, tofree_head);
76                 }
77 +               spin_unlock(&inode->i_lock);
78         }
79         spin_unlock(&inode_sb_list_lock);
80  #ifdef CONFIG_QUOTA_DEBUG
81 @@ -1040,9 +1029,7 @@ static void drop_dquot_ref(struct super_
82         LIST_HEAD(tofree_head);
83  
84         if (sb->dq_op) {
85 -               down_write(&sb_dqopt(sb)->dqptr_sem);
86                 remove_dquot_ref(sb, type, &tofree_head);
87 -               up_write(&sb_dqopt(sb)->dqptr_sem);
88                 put_dquot_list(&tofree_head);
89         }
90  }
91 @@ -1349,9 +1336,6 @@ static int dquot_active(const struct ino
92  /*
93   * Initialize quota pointers in inode
94   *
95 - * We do things in a bit complicated way but by that we avoid calling
96 - * dqget() and thus filesystem callbacks under dqptr_sem.
97 - *
98   * It is better to call this function outside of any transaction as it
99   * might need a lot of space in journal for dquot structure allocation.
100   */
101 @@ -1384,7 +1368,7 @@ static void __dquot_initialize(struct in
102                 got[cnt] = dqget(sb, id, cnt);
103         }
104  
105 -       down_write(&sb_dqopt(sb)->dqptr_sem);
106 +       spin_lock(&inode->i_lock);
107         if (IS_NOQUOTA(inode))
108                 goto out_err;
109         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
110 @@ -1404,12 +1388,16 @@ static void __dquot_initialize(struct in
111                          * did a write before quota was turned on
112                          */
113                         rsv = inode_get_rsv_space(inode);
114 -                       if (unlikely(rsv))
115 +                       if (unlikely(rsv)) {
116 +                               spin_lock(&dq_data_lock);
117                                 dquot_resv_space(inode->i_dquot[cnt], rsv);
118 +                               spin_unlock(&dq_data_lock);
119 +                       }
120                 }
121         }
122  out_err:
123 -       up_write(&sb_dqopt(sb)->dqptr_sem);
124 +       spin_unlock(&inode->i_lock);
125 +
126         /* Drop unused references */
127         dqput_all(got);
128  }
129 @@ -1428,12 +1416,12 @@ static void __dquot_drop(struct inode *i
130         int cnt;
131         struct dquot *put[MAXQUOTAS];
132  
133 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
134 +       spin_lock(&inode->i_lock);
135         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
136                 put[cnt] = inode->i_dquot[cnt];
137                 inode->i_dquot[cnt] = NULL;
138         }
139 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
140 +       spin_unlock(&inode->i_lock);
141         dqput_all(put);
142  }
143  
144 @@ -1473,27 +1461,42 @@ static qsize_t *inode_reserved_space(str
145         return inode->i_sb->dq_op->get_reserved_space(inode);
146  }
147  
148 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
149 +{
150 +       *inode_reserved_space(inode) += number;
151 +}
152 +
153  void inode_add_rsv_space(struct inode *inode, qsize_t number)
154  {
155         spin_lock(&inode->i_lock);
156 -       *inode_reserved_space(inode) += number;
157 +       __inode_add_rsv_space(inode, number);
158         spin_unlock(&inode->i_lock);
159  }
160  EXPORT_SYMBOL(inode_add_rsv_space);
161  
162 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
163 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
164  {
165 -       spin_lock(&inode->i_lock);
166         *inode_reserved_space(inode) -= number;
167         __inode_add_bytes(inode, number);
168 +}
169 +
170 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
171 +{
172 +       spin_lock(&inode->i_lock);
173 +       __inode_claim_rsv_space(inode, number);
174         spin_unlock(&inode->i_lock);
175  }
176  EXPORT_SYMBOL(inode_claim_rsv_space);
177  
178 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
179 +{
180 +       *inode_reserved_space(inode) -= number;
181 +}
182 +
183  void inode_sub_rsv_space(struct inode *inode, qsize_t number)
184  {
185         spin_lock(&inode->i_lock);
186 -       *inode_reserved_space(inode) -= number;
187 +       __inode_sub_rsv_space(inode, number);
188         spin_unlock(&inode->i_lock);
189  }
190  EXPORT_SYMBOL(inode_sub_rsv_space);
191 @@ -1504,9 +1507,8 @@ static qsize_t inode_get_rsv_space(struc
192  
193         if (!inode->i_sb->dq_op->get_reserved_space)
194                 return 0;
195 -       spin_lock(&inode->i_lock);
196 +
197         ret = *inode_reserved_space(inode);
198 -       spin_unlock(&inode->i_lock);
199         return ret;
200  }
201  
202 @@ -1514,17 +1516,17 @@ static void inode_incr_space(struct inod
203                                 int reserve)
204  {
205         if (reserve)
206 -               inode_add_rsv_space(inode, number);
207 +               __inode_add_rsv_space(inode, number);
208         else
209 -               inode_add_bytes(inode, number);
210 +               __inode_add_bytes(inode, number);
211  }
212  
213  static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
214  {
215         if (reserve)
216 -               inode_sub_rsv_space(inode, number);
217 +               __inode_sub_rsv_space(inode, number);
218         else
219 -               inode_sub_bytes(inode, number);
220 +               __inode_sub_bytes(inode, number);
221  }
222  
223  /*
224 @@ -1547,6 +1549,7 @@ int __dquot_alloc_space(struct inode *in
225         int warn = flags & DQUOT_SPACE_WARN;
226         int reserve = flags & DQUOT_SPACE_RESERVE;
227         int nofail = flags & DQUOT_SPACE_NOFAIL;
228 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
229  
230         /*
231          * First test before acquiring mutex - solves deadlocks when we
232 @@ -1557,38 +1560,41 @@ int __dquot_alloc_space(struct inode *in
233                 goto out;
234         }
235  
236 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
237 +       spin_lock(&inode->i_lock);
238         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
239                 warntype[cnt] = QUOTA_NL_NOWARN;
240  
241         spin_lock(&dq_data_lock);
242         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
243 -               if (!inode->i_dquot[cnt])
244 +               dquot[cnt] = inode->i_dquot[cnt];
245 +               if (!dquot[cnt])
246                         continue;
247 -               ret = check_bdq(inode->i_dquot[cnt], number, !warn,
248 -                               warntype+cnt);
249 +               atomic_inc(&dquot[cnt]->dq_count);
250 +               ret = check_bdq(dquot[cnt], number, !warn, warntype + cnt);
251                 if (ret && !nofail) {
252                         spin_unlock(&dq_data_lock);
253 +                       spin_unlock(&inode->i_lock);
254                         goto out_flush_warn;
255                 }
256         }
257         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
258 -               if (!inode->i_dquot[cnt])
259 +               if (!dquot[cnt])
260                         continue;
261                 if (reserve)
262 -                       dquot_resv_space(inode->i_dquot[cnt], number);
263 +                       dquot_resv_space(dquot[cnt], number);
264                 else
265 -                       dquot_incr_space(inode->i_dquot[cnt], number);
266 +                       dquot_incr_space(dquot[cnt], number);
267         }
268         inode_incr_space(inode, number, reserve);
269         spin_unlock(&dq_data_lock);
270 +       spin_unlock(&inode->i_lock);
271  
272         if (reserve)
273                 goto out_flush_warn;
274 -       mark_all_dquot_dirty(inode->i_dquot);
275 +       mark_all_dquot_dirty(dquot);
276  out_flush_warn:
277 -       flush_warnings(inode->i_dquot, warntype);
278 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
279 +       flush_warnings(dquot, warntype);
280 +       dqput_all(dquot);
281  out:
282         return ret;
283  }
284 @@ -1601,6 +1607,7 @@ int dquot_alloc_inode(const struct inode
285  {
286         int cnt, ret = 0;
287         char warntype[MAXQUOTAS];
288 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
289  
290         /* First test before acquiring mutex - solves deadlocks when we
291           * re-enter the quota code and are already holding the mutex */
292 @@ -1608,28 +1615,33 @@ int dquot_alloc_inode(const struct inode
293                 return 0;
294         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
295                 warntype[cnt] = QUOTA_NL_NOWARN;
296 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
297 +
298 +       spin_lock(&((struct inode *)inode)->i_lock);
299         spin_lock(&dq_data_lock);
300         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
301 -               if (!inode->i_dquot[cnt])
302 +               dquot[cnt] = inode->i_dquot[cnt];
303 +               if (!dquot[cnt])
304                         continue;
305 -               ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
306 +               atomic_inc(&dquot[cnt]->dq_count);
307 +               ret = check_idq(dquot[cnt], 1, warntype + cnt);
308                 if (ret)
309                         goto warn_put_all;
310         }
311  
312         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
313 -               if (!inode->i_dquot[cnt])
314 +               if (!dquot[cnt])
315                         continue;
316 -               dquot_incr_inodes(inode->i_dquot[cnt], 1);
317 +               dquot_incr_inodes(dquot[cnt], 1);
318         }
319  
320  warn_put_all:
321         spin_unlock(&dq_data_lock);
322 +       spin_unlock(&((struct inode *)inode)->i_lock);
323 +
324         if (ret == 0)
325 -               mark_all_dquot_dirty(inode->i_dquot);
326 -       flush_warnings(inode->i_dquot, warntype);
327 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
328 +               mark_all_dquot_dirty(dquot);
329 +       flush_warnings(dquot, warntype);
330 +       dqput_all(dquot);
331         return ret;
332  }
333  EXPORT_SYMBOL(dquot_alloc_inode);
334 @@ -1639,6 +1651,7 @@ EXPORT_SYMBOL(dquot_alloc_inode);
335   */
336  int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
337  {
338 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
339         int cnt;
340  
341         if (!dquot_active(inode)) {
342 @@ -1646,19 +1659,23 @@ int dquot_claim_space_nodirty(struct ino
343                 return 0;
344         }
345  
346 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
347 +       spin_lock(&inode->i_lock);
348         spin_lock(&dq_data_lock);
349         /* Claim reserved quotas to allocated quotas */
350         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
351 -               if (inode->i_dquot[cnt])
352 -                       dquot_claim_reserved_space(inode->i_dquot[cnt],
353 -                                                       number);
354 +               dquot[cnt] = inode->i_dquot[cnt];
355 +               if (dquot[cnt]) {
356 +                       atomic_inc(&dquot[cnt]->dq_count);
357 +                       dquot_claim_reserved_space(dquot[cnt], number);
358 +               }
359         }
360         /* Update inode bytes */
361 -       inode_claim_rsv_space(inode, number);
362 +       __inode_claim_rsv_space(inode, number);
363         spin_unlock(&dq_data_lock);
364 -       mark_all_dquot_dirty(inode->i_dquot);
365 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
366 +       spin_unlock(&inode->i_lock);
367 +
368 +       mark_all_dquot_dirty(dquot);
369 +       dqput_all(dquot);
370         return 0;
371  }
372  EXPORT_SYMBOL(dquot_claim_space_nodirty);
373 @@ -1671,6 +1688,7 @@ void __dquot_free_space(struct inode *in
374         unsigned int cnt;
375         char warntype[MAXQUOTAS];
376         int reserve = flags & DQUOT_SPACE_RESERVE;
377 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
378  
379         /* First test before acquiring mutex - solves deadlocks when we
380           * re-enter the quota code and are already holding the mutex */
381 @@ -1679,26 +1697,29 @@ void __dquot_free_space(struct inode *in
382                 return;
383         }
384  
385 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
386 +       spin_lock(&inode->i_lock);
387         spin_lock(&dq_data_lock);
388         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
389 -               if (!inode->i_dquot[cnt])
390 +               dquot[cnt] = inode->i_dquot[cnt];
391 +               if (!dquot[cnt])
392                         continue;
393 -               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
394 +               atomic_inc(&dquot[cnt]->dq_count);
395 +               warntype[cnt] = info_bdq_free(dquot[cnt], number);
396                 if (reserve)
397 -                       dquot_free_reserved_space(inode->i_dquot[cnt], number);
398 +                       dquot_free_reserved_space(dquot[cnt], number);
399                 else
400 -                       dquot_decr_space(inode->i_dquot[cnt], number);
401 +                       dquot_decr_space(dquot[cnt], number);
402         }
403         inode_decr_space(inode, number, reserve);
404         spin_unlock(&dq_data_lock);
405 +       spin_unlock(&inode->i_lock);
406  
407         if (reserve)
408                 goto out_unlock;
409 -       mark_all_dquot_dirty(inode->i_dquot);
410 +       mark_all_dquot_dirty(dquot);
411  out_unlock:
412 -       flush_warnings(inode->i_dquot, warntype);
413 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
414 +       flush_warnings(dquot, warntype);
415 +       dqput_all(dquot);
416  }
417  EXPORT_SYMBOL(__dquot_free_space);
418  
419 @@ -1707,26 +1728,31 @@ EXPORT_SYMBOL(__dquot_free_space);
420   */
421  void dquot_free_inode(const struct inode *inode)
422  {
423 -       unsigned int cnt;
424 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
425         char warntype[MAXQUOTAS];
426 +       unsigned int cnt;
427  
428         /* First test before acquiring mutex - solves deadlocks when we
429           * re-enter the quota code and are already holding the mutex */
430         if (!dquot_active(inode))
431                 return;
432  
433 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
434 +       spin_lock(&((struct inode *)inode)->i_lock);
435         spin_lock(&dq_data_lock);
436         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
437 -               if (!inode->i_dquot[cnt])
438 +               dquot[cnt] = inode->i_dquot[cnt];
439 +               if (!dquot[cnt])
440                         continue;
441 -               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
442 -               dquot_decr_inodes(inode->i_dquot[cnt], 1);
443 +               atomic_inc(&dquot[cnt]->dq_count);
444 +               warntype[cnt] = info_idq_free(dquot[cnt], 1);
445 +               dquot_decr_inodes(dquot[cnt], 1);
446         }
447         spin_unlock(&dq_data_lock);
448 -       mark_all_dquot_dirty(inode->i_dquot);
449 -       flush_warnings(inode->i_dquot, warntype);
450 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
451 +       spin_unlock(&((struct inode *)inode)->i_lock);
452 +
453 +       mark_all_dquot_dirty(dquot);
454 +       flush_warnings(dquot, warntype);
455 +       dqput_all(dquot);
456  }
457  EXPORT_SYMBOL(dquot_free_inode);
458  
459 @@ -1757,13 +1783,13 @@ int __dquot_transfer(struct inode *inode
460         /* Initialize the arrays */
461         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
462                 warntype_to[cnt] = QUOTA_NL_NOWARN;
463 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
464 +       spin_lock(&inode->i_lock);
465         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
466 -               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
467 +               spin_unlock(&inode->i_lock);
468                 return 0;
469         }
470         spin_lock(&dq_data_lock);
471 -       cur_space = inode_get_bytes(inode);
472 +       cur_space = __inode_get_bytes(inode);
473         rsv_space = inode_get_rsv_space(inode);
474         space = cur_space + rsv_space;
475         /* Build the transfer_from list and check the limits */
476 @@ -1811,7 +1837,7 @@ int __dquot_transfer(struct inode *inode
477                 inode->i_dquot[cnt] = transfer_to[cnt];
478         }
479         spin_unlock(&dq_data_lock);
480 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
481 +       spin_unlock(&inode->i_lock);
482  
483         mark_all_dquot_dirty(transfer_from);
484         mark_all_dquot_dirty(transfer_to);
485 @@ -1825,7 +1851,7 @@ int __dquot_transfer(struct inode *inode
486         return 0;
487  over_quota:
488         spin_unlock(&dq_data_lock);
489 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
490 +       spin_unlock(&inode->i_lock);
491         flush_warnings(transfer_to, warntype_to);
492         return ret;
493  }
494 diff -urp linux-3.0.61-0.orig/fs/quota/quota.c linux-3.0.61-0/fs/quota/quota.c
495 --- linux-3.0.61-0.orig/fs/quota/quota.c        2013-04-10 15:15:08.000000000 -0400
496 +++ linux-3.0.61-0/fs/quota/quota.c     2013-04-24 10:27:22.000000000 -0400
497 @@ -79,13 +79,13 @@ static int quota_getfmt(struct super_blo
498  {
499         __u32 fmt;
500  
501 -       down_read(&sb_dqopt(sb)->dqptr_sem);
502 +       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
503         if (!sb_has_quota_active(sb, type)) {
504 -               up_read(&sb_dqopt(sb)->dqptr_sem);
505 +               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
506                 return -ESRCH;
507         }
508         fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
509 -       up_read(&sb_dqopt(sb)->dqptr_sem);
510 +       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
511         if (copy_to_user(addr, &fmt, sizeof(fmt)))
512                 return -EFAULT;
513         return 0;
514 diff -urp linux-3.0.61-0.orig/fs/stat.c linux-3.0.61-0/fs/stat.c
515 --- linux-3.0.61-0.orig/fs/stat.c       2013-04-10 15:15:08.000000000 -0400
516 +++ linux-3.0.61-0/fs/stat.c    2013-04-24 10:27:22.000000000 -0400
517 @@ -435,9 +435,8 @@ void inode_add_bytes(struct inode *inode
518  
519  EXPORT_SYMBOL(inode_add_bytes);
520  
521 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
522 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
523  {
524 -       spin_lock(&inode->i_lock);
525         inode->i_blocks -= bytes >> 9;
526         bytes &= 511;
527         if (inode->i_bytes < bytes) {
528 @@ -445,17 +444,28 @@ void inode_sub_bytes(struct inode *inode
529                 inode->i_bytes += 512;
530         }
531         inode->i_bytes -= bytes;
532 +}
533 +
534 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
535 +{
536 +       spin_lock(&inode->i_lock);
537 +       __inode_sub_bytes(inode, bytes);
538         spin_unlock(&inode->i_lock);
539  }
540  
541  EXPORT_SYMBOL(inode_sub_bytes);
542  
543 +loff_t __inode_get_bytes(struct inode *inode)
544 +{
545 +       return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
546 +}
547 +
548  loff_t inode_get_bytes(struct inode *inode)
549  {
550         loff_t ret;
551  
552         spin_lock(&inode->i_lock);
553 -       ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
554 +       ret = __inode_get_bytes(inode);
555         spin_unlock(&inode->i_lock);
556         return ret;
557  }
558 diff -urp linux-3.0.61-0.orig/fs/super.c linux-3.0.61-0/fs/super.c
559 --- linux-3.0.61-0.orig/fs/super.c      2013-04-10 15:15:08.000000000 -0400
560 +++ linux-3.0.61-0/fs/super.c   2013-04-24 10:27:22.000000000 -0400
561 @@ -108,7 +108,6 @@ static struct super_block *alloc_super(s
562                 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
563                 mutex_init(&s->s_dquot.dqio_mutex);
564                 mutex_init(&s->s_dquot.dqonoff_mutex);
565 -               init_rwsem(&s->s_dquot.dqptr_sem);
566                 init_waitqueue_head(&s->s_wait_unfrozen);
567                 s->s_maxbytes = MAX_NON_LFS;
568                 s->s_op = &default_op;
569 diff -urp linux-3.0.61-0.orig/include/linux/fs.h linux-3.0.61-0/include/linux/fs.h
570 --- linux-3.0.61-0.orig/include/linux/fs.h      2013-04-24 10:27:55.000000000 -0400
571 +++ linux-3.0.61-0/include/linux/fs.h   2013-04-22 17:42:39.000000000 -0400
572 @@ -2450,7 +2450,9 @@ extern void generic_fillattr(struct inod
573  extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
574  void __inode_add_bytes(struct inode *inode, loff_t bytes);
575  void inode_add_bytes(struct inode *inode, loff_t bytes);
576 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
577  void inode_sub_bytes(struct inode *inode, loff_t bytes);
578 +loff_t __inode_get_bytes(struct inode *inode);
579  loff_t inode_get_bytes(struct inode *inode);
580  void inode_set_bytes(struct inode *inode, loff_t bytes);
581