%diffstat blockgroup_lock.h | 4 +++- percpu_counter.h | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) %patch Index: linux-2.6.6/include/linux/percpu_counter.h =================================================================== --- linux-2.6.6.orig/include/linux/percpu_counter.h 2004-04-04 11:37:23.000000000 +0800 +++ linux-2.6.6/include/linux/percpu_counter.h 2004-05-22 16:08:16.000000000 +0800 @@ -3,6 +3,8 @@ * * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ +#ifndef _LINUX_PERCPU_COUNTER_H +#define _LINUX_PERCPU_COUNTER_H #include #include @@ -101,3 +103,5 @@ static inline void percpu_counter_dec(st { percpu_counter_mod(fbc, -1); } + +#endif /* _LINUX_PERCPU_COUNTER_H */ Index: linux-2.6.6/include/linux/blockgroup_lock.h =================================================================== --- linux-2.6.6.orig/include/linux/blockgroup_lock.h 2004-04-04 11:36:26.000000000 +0800 +++ linux-2.6.6/include/linux/blockgroup_lock.h 2004-05-22 16:08:45.000000000 +0800 @@ -3,6 +3,8 @@ * * Simple hashed spinlocking. */ +#ifndef _LINUX_BLOCKGROUP_LOCK_H +#define _LINUX_BLOCKGROUP_LOCK_H #include #include @@ -55,4 +57,4 @@ static inline void bgl_lock_init(struct #define sb_bgl_lock(sb, block_group) \ (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock) - +#endif