X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Fliblustre.h;h=2e4968fdc2cac707a11d5e3b2de6945c185f6fd4;hb=d4a31821ccd63fcc5197680300bdc37a9453a332;hp=e801dc6972b9d1b6fb6dc9f334b2c66b139d0613;hpb=859678cc6b075f7c81903e44b99bdbd18c635cbb;p=fs%2Flustre-release.git diff --git a/lustre/include/liblustre.h b/lustre/include/liblustre.h index e801dc6..2e4968f 100644 --- a/lustre/include/liblustre.h +++ b/lustre/include/liblustre.h @@ -62,6 +62,7 @@ #endif #include #include +#include #include #include @@ -71,9 +72,9 @@ #ifdef __CYGWIN__ -#define CFS_PAGE_SHIFT 12 -#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) -#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) +#define CFS_PAGE_SHIFT 12 +#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) +#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) #define loff_t long long #define ERESTART 2001 typedef unsigned short umode_t; @@ -84,6 +85,10 @@ typedef unsigned short umode_t; # define CURRENT_SECONDS time(0) #endif +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof (a))/(sizeof ((a)[0]))) +#endif + /* This is because lprocfs_status.h gets included here indirectly. It would * be much better to just avoid lprocfs being included into liblustre entirely * but that requires more header surgery than I can handle right now. @@ -91,8 +96,11 @@ typedef unsigned short umode_t; #ifndef smp_processor_id #define smp_processor_id() 0 #endif -#ifndef smp_num_cpus -#define smp_num_cpus 1 +#ifndef num_online_cpus +#define num_online_cpus() 1 +#endif +#ifndef num_possible_cpus +#define num_possible_cpus() 1 #endif /* always adopt 2.5 definitions */ @@ -145,14 +153,10 @@ static inline void *kmalloc(int size, int prot) #define GFP_HIGHUSER 1 #define GFP_ATOMIC 1 #define GFP_NOFS 1 -#define IS_ERR(a) ((unsigned long)(a) < 1000) +#define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L) #define PTR_ERR(a) ((long)(a)) #define ERR_PTR(a) ((void*)((long)(a))) -typedef struct { - void *cwd; -}mm_segment_t; - typedef int (read_proc_t)(char *page, char **start, off_t off, int count, int *eof, void *data); @@ -208,7 +212,7 @@ static __inline__ int clear_bit(int nr, long * addr) return nr; } -static __inline__ int test_bit(int nr, long * addr) +static __inline__ int test_bit(int nr, const long * addr) { return ((1UL << (nr & (BITS_PER_LONG - 1))) & ((addr)[nr / BITS_PER_LONG])) != 0; } @@ -286,6 +290,7 @@ extern int ldlm_init(void); extern int osc_init(void); extern int lov_init(void); extern int mdc_init(void); +extern int lmv_init(void); extern int mgc_init(void); extern int echo_client_init(void); @@ -295,6 +300,8 @@ extern int echo_client_init(void); #define EXPORT_SYMBOL(S) +struct rcu_head { }; + typedef struct { } spinlock_t; typedef __u64 kdev_t; @@ -314,6 +321,14 @@ static inline void spin_unlock_bh(spinlock_t *l) {} static inline void spin_lock_irqsave(spinlock_t *a, unsigned long b) {} static inline void spin_unlock_irqrestore(spinlock_t *a, unsigned long b) {} +typedef spinlock_t rwlock_t; +#define RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED +#define read_lock(l) spin_lock(l) +#define read_unlock(l) spin_unlock(l) +#define write_lock(l) spin_lock(l) +#define write_unlock(l) spin_unlock(l) + + #define min(x,y) ((x)<(y) ? (x) : (y)) #define max(x,y) ((x)>(y) ? (x) : (y)) @@ -411,8 +426,10 @@ static inline cfs_page_t *alloc_pages(int mask, unsigned long order) } return pg; } +#define cfs_alloc_pages(mask, order) alloc_pages((mask), (order)) -#define alloc_page(mask) alloc_pages((mask), 0) +#define alloc_page(mask) alloc_pages((mask), 0) +#define cfs_alloc_page(mask) alloc_page(mask) static inline void __free_pages(cfs_page_t *pg, int what) { @@ -423,9 +440,11 @@ static inline void __free_pages(cfs_page_t *pg, int what) #endif free(pg); } +#define __cfs_free_pages(pg, order) __free_pages((pg), (order)) #define __free_page(page) __free_pages((page), 0) #define free_page(page) __free_page(page) +#define __cfs_free_page(page) __cfs_free_pages((page), 0) static inline cfs_page_t* __grab_cache_page(unsigned long index) { @@ -463,6 +482,7 @@ static inline cfs_page_t* __grab_cache_page(unsigned long index) #define ATTR_RAW 0x0800 /* file system, not vfs will massage attrs */ #define ATTR_FROM_OPEN 0x1000 /* called from open path, ie O_TRUNC */ #define ATTR_CTIME_SET 0x2000 +#define ATTR_BLOCKS 0x4000 struct iattr { unsigned int ia_valid; @@ -475,7 +495,8 @@ struct iattr { time_t ia_ctime; unsigned int ia_attr_flags; }; -#define ll_iattr_struct iattr + +#define ll_iattr iattr #define IT_OPEN 0x0001 #define IT_CREAT 0x0002 @@ -519,7 +540,6 @@ static inline void intent_init(struct lookup_intent *it, int op, int flags) it->it_flags = flags; } - struct dentry { int d_count; }; @@ -579,6 +599,8 @@ struct task_struct { int state; struct signal pending; char comm[32]; + int uid; + int gid; int pid; int fsuid; int fsgid; @@ -701,6 +723,7 @@ static inline void del_timer(struct timer_list *l) typedef struct { volatile int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } #define atomic_read(a) ((a)->counter) #define atomic_set(a,b) do {(a)->counter = b; } while (0) #define atomic_dec_and_test(a) ((--((a)->counter)) == 0) @@ -717,9 +740,43 @@ typedef struct { volatile int counter; } atomic_t; #define unlikely(exp) (exp) #endif +#define might_sleep() +#define might_sleep_if(c) +#define smp_mb() + +static inline +int test_and_set_bit(int nr, unsigned long *addr) +{ + int oldbit; + + while (nr >= sizeof(long)) { + nr -= sizeof(long); + addr++; + } + + oldbit = (*addr) & (1 << nr); + *addr |= (1 << nr); + return oldbit; +} + +static inline +int test_and_clear_bit(int nr, unsigned long *addr) +{ + int oldbit; + + while (nr >= sizeof(long)) { + nr -= sizeof(long); + addr++; + } + + oldbit = (*addr) & (1 << nr); + *addr &= ~(1 << nr); + return oldbit; +} + /* FIXME sys/capability will finally included linux/fs.h thus * cause numerous trouble on x86-64. as temporary solution for - * build broken at cary, we copy definition we need from capability.h + * build broken at Cray, we copy definition we need from capability.h * FIXME */ struct _cap_struct; @@ -815,7 +872,7 @@ typedef struct file_lock { unsigned long fl_break_time; /* for nonblocking lease breaks */ union { - struct nfs_lock_info nfs_fl; + struct nfs_lock_info nfs_fl; } fl_u; } cfs_flock_t; @@ -891,6 +948,20 @@ void posix_acl_release(struct posix_acl *acl) #define ENOTSUPP ENOTSUP #endif +typedef int mm_segment_t; +enum { + KERNEL_DS, + USER_DS +}; +static inline mm_segment_t get_fs(void) +{ + return USER_DS; +} + +static inline void set_fs(mm_segment_t seg) +{ +} + #include #include #include @@ -898,4 +969,60 @@ void posix_acl_release(struct posix_acl *acl) #include #include +/* Fast hashing routine for a long. + (C) 2002 William Lee Irwin III, IBM */ + +/* + * Knuth recommends primes in approximately golden ratio to the maximum + * integer representable by a machine word for multiplicative hashing. + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * These primes are chosen to be bit-sparse, that is operations on + * them can use shifts and additions instead of multiplications for + * machines where multiplications are slow. + */ +#if BITS_PER_LONG == 32 +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ +#define GOLDEN_RATIO_PRIME 0x9e370001UL +#elif BITS_PER_LONG == 64 +/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ +#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL +#else +#error Define GOLDEN_RATIO_PRIME for your wordsize. +#endif + +static inline unsigned long hash_long(unsigned long val, unsigned int bits) +{ + unsigned long hash = val; + +#if BITS_PER_LONG == 64 + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + unsigned long n = hash; + n <<= 18; + hash -= n; + n <<= 33; + hash -= n; + n <<= 3; + hash += n; + n <<= 3; + hash -= n; + n <<= 4; + hash += n; + n <<= 2; + hash += n; +#else + /* On some cpus multiply is faster, on others gcc will do shifts */ + hash *= GOLDEN_RATIO_PRIME; +#endif + + /* High bits are more random, so use them. */ + return hash >> (BITS_PER_LONG - bits); +} + +static inline unsigned long hash_ptr(void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} + #endif