#endif
#include <unistd.h>
#include <fcntl.h>
+#include <assert.h>
#include <libcfs/list.h>
#include <lnet/lnet.h>
#ifdef __CYGWIN__
-#define CFS_PAGE_SHIFT 12
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
+#define CFS_PAGE_SHIFT 12
+#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
+#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
#define loff_t long long
#define ERESTART 2001
typedef unsigned short umode_t;
# define CURRENT_SECONDS time(0)
#endif
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) ((sizeof (a))/(sizeof ((a)[0])))
+#endif
+
/* This is because lprocfs_status.h gets included here indirectly. It would
* be much better to just avoid lprocfs being included into liblustre entirely
* but that requires more header surgery than I can handle right now.
#ifndef smp_processor_id
#define smp_processor_id() 0
#endif
-#ifndef smp_num_cpus
-#define smp_num_cpus 1
+#ifndef num_online_cpus
+#define num_online_cpus() 1
+#endif
+#ifndef num_possible_cpus
+#define num_possible_cpus() 1
#endif
/* always adopt 2.5 definitions */
#define GFP_HIGHUSER 1
#define GFP_ATOMIC 1
#define GFP_NOFS 1
-#define IS_ERR(a) ((unsigned long)(a) < 1000)
+#define IS_ERR(a) ((unsigned long)(a) > (unsigned long)-1000L)
#define PTR_ERR(a) ((long)(a))
#define ERR_PTR(a) ((void*)((long)(a)))
-typedef struct {
- void *cwd;
-}mm_segment_t;
-
typedef int (read_proc_t)(char *page, char **start, off_t off,
int count, int *eof, void *data);
return nr;
}
-static __inline__ int test_bit(int nr, long * addr)
+static __inline__ int test_bit(int nr, const long * addr)
{
return ((1UL << (nr & (BITS_PER_LONG - 1))) & ((addr)[nr / BITS_PER_LONG])) != 0;
}
extern int osc_init(void);
extern int lov_init(void);
extern int mdc_init(void);
+extern int lmv_init(void);
extern int mgc_init(void);
extern int echo_client_init(void);
#define EXPORT_SYMBOL(S)
+struct rcu_head { };
+
typedef struct { } spinlock_t;
typedef __u64 kdev_t;
static inline void spin_lock_irqsave(spinlock_t *a, unsigned long b) {}
static inline void spin_unlock_irqrestore(spinlock_t *a, unsigned long b) {}
+typedef spinlock_t rwlock_t;
+#define RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+#define read_lock(l) spin_lock(l)
+#define read_unlock(l) spin_unlock(l)
+#define write_lock(l) spin_lock(l)
+#define write_unlock(l) spin_unlock(l)
+
+
#define min(x,y) ((x)<(y) ? (x) : (y))
#define max(x,y) ((x)>(y) ? (x) : (y))
}
return pg;
}
+#define cfs_alloc_pages(mask, order) alloc_pages((mask), (order))
-#define alloc_page(mask) alloc_pages((mask), 0)
+#define alloc_page(mask) alloc_pages((mask), 0)
+#define cfs_alloc_page(mask) alloc_page(mask)
static inline void __free_pages(cfs_page_t *pg, int what)
{
#endif
free(pg);
}
+#define __cfs_free_pages(pg, order) __free_pages((pg), (order))
#define __free_page(page) __free_pages((page), 0)
#define free_page(page) __free_page(page)
+#define __cfs_free_page(page) __cfs_free_pages((page), 0)
static inline cfs_page_t* __grab_cache_page(unsigned long index)
{
#define ATTR_RAW 0x0800 /* file system, not vfs will massage attrs */
#define ATTR_FROM_OPEN 0x1000 /* called from open path, ie O_TRUNC */
#define ATTR_CTIME_SET 0x2000
+#define ATTR_BLOCKS 0x4000
struct iattr {
unsigned int ia_valid;
time_t ia_ctime;
unsigned int ia_attr_flags;
};
-#define ll_iattr_struct iattr
+
+#define ll_iattr iattr
#define IT_OPEN 0x0001
#define IT_CREAT 0x0002
it->it_flags = flags;
}
-
struct dentry {
int d_count;
};
int state;
struct signal pending;
char comm[32];
+ int uid;
+ int gid;
int pid;
int fsuid;
int fsgid;
typedef struct { volatile int counter; } atomic_t;
+#define ATOMIC_INIT(i) { (i) }
#define atomic_read(a) ((a)->counter)
#define atomic_set(a,b) do {(a)->counter = b; } while (0)
#define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
#define atomic_dec(a) do { (a)->counter--; } while (0)
#define atomic_add(b,a) do {(a)->counter += b;} while (0)
#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
-#define atomic_sub_return(n,a) ((a)->counter -= n)
-#define atomic_dec_return(a) atomic_sub_return(1,a)
#ifndef likely
#define likely(exp) (exp)
#define unlikely(exp) (exp)
#endif
+#define might_sleep()
+#define might_sleep_if(c)
+#define smp_mb()
+
+static inline
+int test_and_set_bit(int nr, unsigned long *addr)
+{
+ int oldbit;
+
+ while (nr >= sizeof(long)) {
+ nr -= sizeof(long);
+ addr++;
+ }
+
+ oldbit = (*addr) & (1 << nr);
+ *addr |= (1 << nr);
+ return oldbit;
+}
+
+static inline
+int test_and_clear_bit(int nr, unsigned long *addr)
+{
+ int oldbit;
+
+ while (nr >= sizeof(long)) {
+ nr -= sizeof(long);
+ addr++;
+ }
+
+ oldbit = (*addr) & (1 << nr);
+ *addr &= ~(1 << nr);
+ return oldbit;
+}
+
/* FIXME sys/capability will finally included linux/fs.h thus
* cause numerous trouble on x86-64. as temporary solution for
- * build broken at cary, we copy definition we need from capability.h
+ * build broken at Cray, we copy definition we need from capability.h
* FIXME
*/
struct _cap_struct;
unsigned long fl_break_time; /* for nonblocking lease breaks */
union {
- struct nfs_lock_info nfs_fl;
+ struct nfs_lock_info nfs_fl;
} fl_u;
} cfs_flock_t;
#define ENOTSUPP ENOTSUP
#endif
+typedef int mm_segment_t;
+enum {
+ KERNEL_DS,
+ USER_DS
+};
+static inline mm_segment_t get_fs(void)
+{
+ return USER_DS;
+}
+
+static inline void set_fs(mm_segment_t seg)
+{
+}
+
#include <obd_support.h>
#include <lustre/lustre_idl.h>
#include <lustre_lib.h>
#include <lustre_export.h>
#include <lustre_net.h>
+/* Fast hashing routine for a long.
+ (C) 2002 William Lee Irwin III, IBM */
+
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+#if BITS_PER_LONG == 32
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+#elif BITS_PER_LONG == 64
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#else
+#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#endif
+
+static inline unsigned long hash_long(unsigned long val, unsigned int bits)
+{
+ unsigned long hash = val;
+
+#if BITS_PER_LONG == 64
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ unsigned long n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#else
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ hash *= GOLDEN_RATIO_PRIME;
+#endif
+
+ /* High bits are more random, so use them. */
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
+{
+ return hash_long((unsigned long)ptr, bits);
+}
+
#endif