Whamcloud - gitweb
LU-3427 build: fix 'error handling' issues
[fs/lustre-release.git] / lustre / kernel_patches / patches / replace_dqptr_sem-sles11sp2.patch
1 diff -urp linux-3.0.61-0.orig/fs/quota/dquot.c linux-3.0.61-0/fs/quota/dquot.c
2 --- linux-3.0.61-0.orig/fs/quota/dquot.c        2013-04-10 15:15:11.000000000 -0400
3 +++ linux-3.0.61-0/fs/quota/dquot.c     2013-04-24 10:27:22.000000000 -0400
4 @@ -83,26 +83,21 @@
5  /*
6   * There are three quota SMP locks. dq_list_lock protects all lists with quotas
7   * and quota formats.
8 - * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
9 - * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
10 - * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
11 - * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
12 - * modifications of quota state (on quotaon and quotaoff) and readers who care
13 - * about latest values take it as well.
14 + * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures.
15 + * dq_state_lock protects modifications of quota state (on quotaon and quotaoff)
16 + * and readers who care about latest values take it as well.
17   *
18 - * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
19 + * The spinlock ordering is hence: i_lock > dq_data_lock > dq_list_lock,
20   *   dq_list_lock > dq_state_lock
21   *
22   * Note that some things (eg. sb pointer, type, id) doesn't change during
23   * the life of the dquot structure and so needn't to be protected by a lock
24   *
25 - * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
26 - * operation is just reading pointers from inode (or not using them at all) the
27 - * read lock is enough. If pointers are altered function must hold write lock.
28 + * Any operation working on dquots via inode pointers must hold i_lock.
29   * Special care needs to be taken about S_NOQUOTA inode flag (marking that
30   * inode is a quota file). Functions adding pointers from inode to dquots have
31 - * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
32 - * have to do all pointer modifications before dropping dqptr_sem. This makes
33 + * to check this flag under i_lock and then (if S_NOQUOTA is not set) they
34 + * have to do all pointer modifications before dropping i_lock. This makes
35   * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
36   * then drops all pointers to dquots from an inode.
37   *
38 @@ -116,15 +111,8 @@
39   * spinlock to internal buffers before writing.
40   *
41   * Lock ordering (including related VFS locks) is the following:
42 - *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
43 - *   dqio_mutex
44 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
45 - * dqptr_sem. But filesystem has to count with the fact that functions such as
46 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
47 - * from inside a transaction to keep filesystem consistency after a crash. Also
48 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is
49 - * called with dqptr_sem held.
50 - * i_mutex on quota files is special (it's below dqio_mutex)
51 + *  i_mutex > dqonoff_sem > journal_lock > dquot->dq_lock > dqio_mutex
52 + *  i_mutex on quota files is special (it's below dqio_mutex)
53   */
54  
55  static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
56 @@ -955,7 +943,6 @@ static inline int dqput_blocks(struct dq
57  /*
58   * Remove references to dquots from inode and add dquot to list for freeing
59   * if we have the last reference to dquot
60 - * We can't race with anybody because we hold dqptr_sem for writing...
61   */
62  static int remove_inode_dquot_ref(struct inode *inode, int type,
63                                   struct list_head *tofree_head)
64 @@ -1016,13 +1003,15 @@ static void remove_dquot_ref(struct supe
65                  *  We have to scan also I_NEW inodes because they can already
66                  *  have quota pointer initialized. Luckily, we need to touch
67                  *  only quota pointers and these have separate locking
68 -                *  (dqptr_sem).
69 +                *  (i_lock).
70                  */
71 +               spin_lock(&inode->i_lock);
72                 if (!IS_NOQUOTA(inode)) {
73                         if (unlikely(inode_get_rsv_space(inode) > 0))
74                                 reserved = 1;
75                         remove_inode_dquot_ref(inode, type, tofree_head);
76                 }
77 +               spin_unlock(&inode->i_lock);
78         }
79         spin_unlock(&inode_sb_list_lock);
80  #ifdef CONFIG_QUOTA_DEBUG
81 @@ -1040,9 +1029,7 @@ static void drop_dquot_ref(struct super_
82         LIST_HEAD(tofree_head);
83  
84         if (sb->dq_op) {
85 -               down_write(&sb_dqopt(sb)->dqptr_sem);
86                 remove_dquot_ref(sb, type, &tofree_head);
87 -               up_write(&sb_dqopt(sb)->dqptr_sem);
88                 put_dquot_list(&tofree_head);
89         }
90  }
91 @@ -1349,9 +1336,6 @@ static int dquot_active(const struct ino
92  /*
93   * Initialize quota pointers in inode
94   *
95 - * We do things in a bit complicated way but by that we avoid calling
96 - * dqget() and thus filesystem callbacks under dqptr_sem.
97 - *
98   * It is better to call this function outside of any transaction as it
99   * might need a lot of space in journal for dquot structure allocation.
100   */
101 @@ -1384,7 +1368,7 @@ static void __dquot_initialize(struct in
102                 got[cnt] = dqget(sb, id, cnt);
103         }
104  
105 -       down_write(&sb_dqopt(sb)->dqptr_sem);
106 +       spin_lock(&inode->i_lock);
107         if (IS_NOQUOTA(inode))
108                 goto out_err;
109         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
110 @@ -1404,12 +1388,16 @@ static void __dquot_initialize(struct in
111                          * did a write before quota was turned on
112                          */
113                         rsv = inode_get_rsv_space(inode);
114 -                       if (unlikely(rsv))
115 +                       if (unlikely(rsv)) {
116 +                               spin_lock(&dq_data_lock);
117                                 dquot_resv_space(inode->i_dquot[cnt], rsv);
118 +                               spin_unlock(&dq_data_lock);
119 +                       }
120                 }
121         }
122  out_err:
123 -       up_write(&sb_dqopt(sb)->dqptr_sem);
124 +       spin_unlock(&inode->i_lock);
125 +
126         /* Drop unused references */
127         dqput_all(got);
128  }
129 @@ -1428,12 +1416,12 @@ static void __dquot_drop(struct inode *i
130         int cnt;
131         struct dquot *put[MAXQUOTAS];
132  
133 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
134 +       spin_lock(&inode->i_lock);
135         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
136                 put[cnt] = inode->i_dquot[cnt];
137                 inode->i_dquot[cnt] = NULL;
138         }
139 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
140 +       spin_unlock(&inode->i_lock);
141         dqput_all(put);
142  }
143  
144 @@ -1473,27 +1461,42 @@ static qsize_t *inode_reserved_space(str
145         return inode->i_sb->dq_op->get_reserved_space(inode);
146  }
147  
148 +static inline void __inode_add_rsv_space(struct inode *inode, qsize_t number)
149 +{
150 +       *inode_reserved_space(inode) += number;
151 +}
152 +
153  void inode_add_rsv_space(struct inode *inode, qsize_t number)
154  {
155         spin_lock(&inode->i_lock);
156 -       *inode_reserved_space(inode) += number;
157 +       __inode_add_rsv_space(inode, number);
158         spin_unlock(&inode->i_lock);
159  }
160  EXPORT_SYMBOL(inode_add_rsv_space);
161  
162 -void inode_claim_rsv_space(struct inode *inode, qsize_t number)
163 +static inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
164  {
165 -       spin_lock(&inode->i_lock);
166         *inode_reserved_space(inode) -= number;
167         __inode_add_bytes(inode, number);
168 +}
169 +
170 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
171 +{
172 +       spin_lock(&inode->i_lock);
173 +       __inode_claim_rsv_space(inode, number);
174         spin_unlock(&inode->i_lock);
175  }
176  EXPORT_SYMBOL(inode_claim_rsv_space);
177  
178 +static inline void __inode_sub_rsv_space(struct inode *inode, qsize_t number)
179 +{
180 +       *inode_reserved_space(inode) -= number;
181 +}
182 +
183  void inode_sub_rsv_space(struct inode *inode, qsize_t number)
184  {
185         spin_lock(&inode->i_lock);
186 -       *inode_reserved_space(inode) -= number;
187 +       __inode_sub_rsv_space(inode, number);
188         spin_unlock(&inode->i_lock);
189  }
190  EXPORT_SYMBOL(inode_sub_rsv_space);
191 @@ -1504,9 +1507,8 @@ static qsize_t inode_get_rsv_space(struc
192  
193         if (!inode->i_sb->dq_op->get_reserved_space)
194                 return 0;
195 -       spin_lock(&inode->i_lock);
196 +
197         ret = *inode_reserved_space(inode);
198 -       spin_unlock(&inode->i_lock);
199         return ret;
200  }
201  
202 @@ -1514,17 +1516,17 @@ static void inode_incr_space(struct inod
203                                 int reserve)
204  {
205         if (reserve)
206 -               inode_add_rsv_space(inode, number);
207 +               __inode_add_rsv_space(inode, number);
208         else
209 -               inode_add_bytes(inode, number);
210 +               __inode_add_bytes(inode, number);
211  }
212  
213  static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
214  {
215         if (reserve)
216 -               inode_sub_rsv_space(inode, number);
217 +               __inode_sub_rsv_space(inode, number);
218         else
219 -               inode_sub_bytes(inode, number);
220 +               __inode_sub_bytes(inode, number);
221  }
222  
223  /*
224 @@ -1547,6 +1549,7 @@ int __dquot_alloc_space(struct inode *in
225         int warn = flags & DQUOT_SPACE_WARN;
226         int reserve = flags & DQUOT_SPACE_RESERVE;
227         int nofail = flags & DQUOT_SPACE_NOFAIL;
228 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
229  
230         /*
231          * First test before acquiring mutex - solves deadlocks when we
232 @@ -1557,15 +1560,17 @@ int __dquot_alloc_space(struct inode *in
233                 goto out;
234         }
235  
236 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
237 +       spin_lock(&inode->i_lock);
238         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
239                 warntype[cnt] = QUOTA_NL_NOWARN;
240  
241         spin_lock(&dq_data_lock);
242         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
243 -               if (!inode->i_dquot[cnt])
244 +               dquot[cnt] = inode->i_dquot[cnt];
245 +               if (!dquot[cnt])
246                         continue;
247 -               ret = check_bdq(inode->i_dquot[cnt], number, !warn,
248 +               atomic_inc(&dquot[cnt]->dq_count);
249 +               ret = check_bdq(dquot[cnt], number, !warn,
250                                 warntype+cnt);
251                 if (ret && !nofail) {
252                         spin_unlock(&dq_data_lock);
253 @@ -1573,22 +1578,23 @@ int __dquot_alloc_space(struct inode *in
254                 }
255         }
256         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
257 -               if (!inode->i_dquot[cnt])
258 +               if (!dquot[cnt])
259                         continue;
260                 if (reserve)
261 -                       dquot_resv_space(inode->i_dquot[cnt], number);
262 +                       dquot_resv_space(dquot[cnt], number);
263                 else
264 -                       dquot_incr_space(inode->i_dquot[cnt], number);
265 +                       dquot_incr_space(dquot[cnt], number);
266         }
267         inode_incr_space(inode, number, reserve);
268         spin_unlock(&dq_data_lock);
269 +       spin_unlock(&inode->i_lock);
270  
271         if (reserve)
272                 goto out_flush_warn;
273 -       mark_all_dquot_dirty(inode->i_dquot);
274 +       mark_all_dquot_dirty(dquot);
275  out_flush_warn:
276 -       flush_warnings(inode->i_dquot, warntype);
277 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
278 +       flush_warnings(dquot, warntype);
279 +       dqput_all(dquot);
280  out:
281         return ret;
282  }
283 @@ -1601,6 +1607,7 @@ int dquot_alloc_inode(const struct inode
284  {
285         int cnt, ret = 0;
286         char warntype[MAXQUOTAS];
287 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
288  
289         /* First test before acquiring mutex - solves deadlocks when we
290           * re-enter the quota code and are already holding the mutex */
291 @@ -1608,28 +1615,33 @@ int dquot_alloc_inode(const struct inode
292                 return 0;
293         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
294                 warntype[cnt] = QUOTA_NL_NOWARN;
295 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
296 +
297 +       spin_lock(&((struct inode *)inode)->i_lock);
298         spin_lock(&dq_data_lock);
299         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
300 -               if (!inode->i_dquot[cnt])
301 +               dquot[cnt] = inode->i_dquot[cnt];
302 +               if (!dquot[cnt])
303                         continue;
304 -               ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
305 +               atomic_inc(&dquot[cnt]->dq_count);
306 +               ret = check_idq(dquot[cnt], 1, warntype + cnt);
307                 if (ret)
308                         goto warn_put_all;
309         }
310  
311         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
312 -               if (!inode->i_dquot[cnt])
313 +               if (!dquot[cnt])
314                         continue;
315 -               dquot_incr_inodes(inode->i_dquot[cnt], 1);
316 +               dquot_incr_inodes(dquot[cnt], 1);
317         }
318  
319  warn_put_all:
320         spin_unlock(&dq_data_lock);
321 +       spin_unlock(&((struct inode *)inode)->i_lock);
322 +
323         if (ret == 0)
324 -               mark_all_dquot_dirty(inode->i_dquot);
325 -       flush_warnings(inode->i_dquot, warntype);
326 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
327 +               mark_all_dquot_dirty(dquot);
328 +       flush_warnings(dquot, warntype);
329 +       dqput_all(dquot);
330         return ret;
331  }
332  EXPORT_SYMBOL(dquot_alloc_inode);
333 @@ -1639,6 +1651,7 @@ EXPORT_SYMBOL(dquot_alloc_inode);
334   */
335  int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
336  {
337 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
338         int cnt;
339  
340         if (!dquot_active(inode)) {
341 @@ -1646,19 +1659,23 @@ int dquot_claim_space_nodirty(struct ino
342                 return 0;
343         }
344  
345 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
346 +       spin_lock(&inode->i_lock);
347         spin_lock(&dq_data_lock);
348         /* Claim reserved quotas to allocated quotas */
349         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
350 -               if (inode->i_dquot[cnt])
351 -                       dquot_claim_reserved_space(inode->i_dquot[cnt],
352 -                                                       number);
353 +               dquot[cnt] = inode->i_dquot[cnt];
354 +               if (dquot[cnt]) {
355 +                       atomic_inc(&dquot[cnt]->dq_count);
356 +                       dquot_claim_reserved_space(dquot[cnt], number);
357 +               }
358         }
359         /* Update inode bytes */
360 -       inode_claim_rsv_space(inode, number);
361 +       __inode_claim_rsv_space(inode, number);
362         spin_unlock(&dq_data_lock);
363 -       mark_all_dquot_dirty(inode->i_dquot);
364 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
365 +       spin_unlock(&inode->i_lock);
366 +
367 +       mark_all_dquot_dirty(dquot);
368 +       dqput_all(dquot);
369         return 0;
370  }
371  EXPORT_SYMBOL(dquot_claim_space_nodirty);
372 @@ -1671,6 +1688,7 @@ void __dquot_free_space(struct inode *in
373         unsigned int cnt;
374         char warntype[MAXQUOTAS];
375         int reserve = flags & DQUOT_SPACE_RESERVE;
376 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
377  
378         /* First test before acquiring mutex - solves deadlocks when we
379           * re-enter the quota code and are already holding the mutex */
380 @@ -1679,26 +1697,29 @@ void __dquot_free_space(struct inode *in
381                 return;
382         }
383  
384 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
385 +       spin_lock(&inode->i_lock);
386         spin_lock(&dq_data_lock);
387         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
388 -               if (!inode->i_dquot[cnt])
389 +               dquot[cnt] = inode->i_dquot[cnt];
390 +               if (!dquot[cnt])
391                         continue;
392 -               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
393 +               atomic_inc(&dquot[cnt]->dq_count);
394 +               warntype[cnt] = info_bdq_free(dquot[cnt], number);
395                 if (reserve)
396 -                       dquot_free_reserved_space(inode->i_dquot[cnt], number);
397 +                       dquot_free_reserved_space(dquot[cnt], number);
398                 else
399 -                       dquot_decr_space(inode->i_dquot[cnt], number);
400 +                       dquot_decr_space(dquot[cnt], number);
401         }
402         inode_decr_space(inode, number, reserve);
403         spin_unlock(&dq_data_lock);
404 +       spin_unlock(&inode->i_lock);
405  
406         if (reserve)
407                 goto out_unlock;
408 -       mark_all_dquot_dirty(inode->i_dquot);
409 +       mark_all_dquot_dirty(dquot);
410  out_unlock:
411 -       flush_warnings(inode->i_dquot, warntype);
412 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
413 +       flush_warnings(dquot, warntype);
414 +       dqput_all(dquot);
415  }
416  EXPORT_SYMBOL(__dquot_free_space);
417  
418 @@ -1707,26 +1728,31 @@ EXPORT_SYMBOL(__dquot_free_space);
419   */
420  void dquot_free_inode(const struct inode *inode)
421  {
422 -       unsigned int cnt;
423 +       struct dquot *dquot[MAXQUOTAS] = { NULL };
424         char warntype[MAXQUOTAS];
425 +       unsigned int cnt;
426  
427         /* First test before acquiring mutex - solves deadlocks when we
428           * re-enter the quota code and are already holding the mutex */
429         if (!dquot_active(inode))
430                 return;
431  
432 -       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
433 +       spin_lock(&((struct inode *)inode)->i_lock);
434         spin_lock(&dq_data_lock);
435         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
436 -               if (!inode->i_dquot[cnt])
437 +               dquot[cnt] = inode->i_dquot[cnt];
438 +               if (!dquot[cnt])
439                         continue;
440 -               warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
441 -               dquot_decr_inodes(inode->i_dquot[cnt], 1);
442 +               atomic_inc(&dquot[cnt]->dq_count);
443 +               warntype[cnt] = info_idq_free(dquot[cnt], 1);
444 +               dquot_decr_inodes(dquot[cnt], 1);
445         }
446         spin_unlock(&dq_data_lock);
447 -       mark_all_dquot_dirty(inode->i_dquot);
448 -       flush_warnings(inode->i_dquot, warntype);
449 -       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
450 +       spin_unlock(&((struct inode *)inode)->i_lock);
451 +
452 +       mark_all_dquot_dirty(dquot);
453 +       flush_warnings(dquot, warntype);
454 +       dqput_all(dquot);
455  }
456  EXPORT_SYMBOL(dquot_free_inode);
457  
458 @@ -1757,13 +1783,13 @@ int __dquot_transfer(struct inode *inode
459         /* Initialize the arrays */
460         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
461                 warntype_to[cnt] = QUOTA_NL_NOWARN;
462 -       down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
463 +       spin_lock(&inode->i_lock);
464         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
465 -               up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
466 +               spin_unlock(&inode->i_lock);
467                 return 0;
468         }
469         spin_lock(&dq_data_lock);
470 -       cur_space = inode_get_bytes(inode);
471 +       cur_space = __inode_get_bytes(inode);
472         rsv_space = inode_get_rsv_space(inode);
473         space = cur_space + rsv_space;
474         /* Build the transfer_from list and check the limits */
475 @@ -1811,7 +1837,7 @@ int __dquot_transfer(struct inode *inode
476                 inode->i_dquot[cnt] = transfer_to[cnt];
477         }
478         spin_unlock(&dq_data_lock);
479 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
480 +       spin_unlock(&inode->i_lock);
481  
482         mark_all_dquot_dirty(transfer_from);
483         mark_all_dquot_dirty(transfer_to);
484 @@ -1825,7 +1851,7 @@ int __dquot_transfer(struct inode *inode
485         return 0;
486  over_quota:
487         spin_unlock(&dq_data_lock);
488 -       up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
489 +       spin_unlock(&inode->i_lock);
490         flush_warnings(transfer_to, warntype_to);
491         return ret;
492  }
493 diff -urp linux-3.0.61-0.orig/fs/quota/quota.c linux-3.0.61-0/fs/quota/quota.c
494 --- linux-3.0.61-0.orig/fs/quota/quota.c        2013-04-10 15:15:08.000000000 -0400
495 +++ linux-3.0.61-0/fs/quota/quota.c     2013-04-24 10:27:22.000000000 -0400
496 @@ -79,13 +79,13 @@ static int quota_getfmt(struct super_blo
497  {
498         __u32 fmt;
499  
500 -       down_read(&sb_dqopt(sb)->dqptr_sem);
501 +       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
502         if (!sb_has_quota_active(sb, type)) {
503 -               up_read(&sb_dqopt(sb)->dqptr_sem);
504 +               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
505                 return -ESRCH;
506         }
507         fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
508 -       up_read(&sb_dqopt(sb)->dqptr_sem);
509 +       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
510         if (copy_to_user(addr, &fmt, sizeof(fmt)))
511                 return -EFAULT;
512         return 0;
513 diff -urp linux-3.0.61-0.orig/fs/stat.c linux-3.0.61-0/fs/stat.c
514 --- linux-3.0.61-0.orig/fs/stat.c       2013-04-10 15:15:08.000000000 -0400
515 +++ linux-3.0.61-0/fs/stat.c    2013-04-24 10:27:22.000000000 -0400
516 @@ -435,9 +435,8 @@ void inode_add_bytes(struct inode *inode
517  
518  EXPORT_SYMBOL(inode_add_bytes);
519  
520 -void inode_sub_bytes(struct inode *inode, loff_t bytes)
521 +void __inode_sub_bytes(struct inode *inode, loff_t bytes)
522  {
523 -       spin_lock(&inode->i_lock);
524         inode->i_blocks -= bytes >> 9;
525         bytes &= 511;
526         if (inode->i_bytes < bytes) {
527 @@ -445,17 +444,28 @@ void inode_sub_bytes(struct inode *inode
528                 inode->i_bytes += 512;
529         }
530         inode->i_bytes -= bytes;
531 +}
532 +
533 +void inode_sub_bytes(struct inode *inode, loff_t bytes)
534 +{
535 +       spin_lock(&inode->i_lock);
536 +       __inode_sub_bytes(inode, bytes);
537         spin_unlock(&inode->i_lock);
538  }
539  
540  EXPORT_SYMBOL(inode_sub_bytes);
541  
542 +loff_t __inode_get_bytes(struct inode *inode)
543 +{
544 +       return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
545 +}
546 +
547  loff_t inode_get_bytes(struct inode *inode)
548  {
549         loff_t ret;
550  
551         spin_lock(&inode->i_lock);
552 -       ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
553 +       ret = __inode_get_bytes(inode);
554         spin_unlock(&inode->i_lock);
555         return ret;
556  }
557 diff -urp linux-3.0.61-0.orig/fs/super.c linux-3.0.61-0/fs/super.c
558 --- linux-3.0.61-0.orig/fs/super.c      2013-04-10 15:15:08.000000000 -0400
559 +++ linux-3.0.61-0/fs/super.c   2013-04-24 10:27:22.000000000 -0400
560 @@ -108,7 +108,6 @@ static struct super_block *alloc_super(s
561                 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
562                 mutex_init(&s->s_dquot.dqio_mutex);
563                 mutex_init(&s->s_dquot.dqonoff_mutex);
564 -               init_rwsem(&s->s_dquot.dqptr_sem);
565                 init_waitqueue_head(&s->s_wait_unfrozen);
566                 s->s_maxbytes = MAX_NON_LFS;
567                 s->s_op = &default_op;
568 diff -urp linux-3.0.61-0.orig/include/linux/fs.h linux-3.0.61-0/include/linux/fs.h
569 --- linux-3.0.61-0.orig/include/linux/fs.h      2013-04-24 10:27:55.000000000 -0400
570 +++ linux-3.0.61-0/include/linux/fs.h   2013-04-22 17:42:39.000000000 -0400
571 @@ -2450,7 +2450,9 @@ extern void generic_fillattr(struct inod
572  extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
573  void __inode_add_bytes(struct inode *inode, loff_t bytes);
574  void inode_add_bytes(struct inode *inode, loff_t bytes);
575 +void __inode_sub_bytes(struct inode *inode, loff_t bytes);
576  void inode_sub_bytes(struct inode *inode, loff_t bytes);
577 +loff_t __inode_get_bytes(struct inode *inode);
578  loff_t inode_get_bytes(struct inode *inode);
579  void inode_set_bytes(struct inode *inode, loff_t bytes);
580