X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fliblustre.h;h=e801dc6972b9d1b6fb6dc9f334b2c66b139d0613;hp=af80f443e794619acc9e62d8dffc9271ce082ba7;hb=859678cc6b075f7c81903e44b99bdbd18c635cbb;hpb=bd1b99d7caa6ab2b7c771524af9178a3da69eeab diff --git a/lustre/include/liblustre.h b/lustre/include/liblustre.h index af80f44..e801dc6 100644 --- a/lustre/include/liblustre.h +++ b/lustre/include/liblustre.h @@ -24,40 +24,66 @@ #ifndef LIBLUSTRE_H__ #define LIBLUSTRE_H__ -#include -#include -#ifndef __CYGWIN__ -#include -#include +#ifdef __KERNEL__ +#error Kernel files should not #include #else -#include -#include "ioctl.h" +/* + * The userspace implementations of linux/spinlock.h vary; we just + * include our own for all of them + */ +#define __LINUX_SPINLOCK_H +#endif + +#include +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_ASM_PAGE_H +# include #endif +#ifdef HAVE_SYS_USER_H +# include +#endif +#ifdef HAVE_SYS_IOCTL_H +# include +#endif +#ifndef _IOWR +# include "ioctl.h" +#endif + #include #include #include #include #include #include -#include +#ifdef HAVE_SYS_VFS_H +# include +#endif +#include +#include -#include -#include -#include +#include +#include +#include /* definitions for liblustre */ #ifdef __CYGWIN__ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define CFS_PAGE_SHIFT 12 +#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) +#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) #define loff_t long long #define ERESTART 2001 typedef unsigned short umode_t; #endif +#ifndef CURRENT_SECONDS +# define CURRENT_SECONDS time(0) +#endif + /* This is because lprocfs_status.h gets included here indirectly. It would * be much better to just avoid lprocfs being included into liblustre entirely * but that requires more header surgery than I can handle right now. @@ -65,18 +91,25 @@ typedef unsigned short umode_t; #ifndef smp_processor_id #define smp_processor_id() 0 #endif +#ifndef smp_num_cpus +#define smp_num_cpus 1 +#endif /* always adopt 2.5 definitions */ #define KERNEL_VERSION(a,b,c) ((a)*100+(b)*10+c) -#define LINUX_VERSION_CODE (2*200+5*10+0) +#define LINUX_VERSION_CODE KERNEL_VERSION(2,5,0) + +#ifndef page_private +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) +#endif + static inline void inter_module_put(void *a) { return; } -extern ptl_handle_ni_t tcpnal_ni; - void *inter_module_get(char *arg); /* cheats for now */ @@ -112,13 +145,10 @@ static inline void *kmalloc(int size, int prot) #define GFP_HIGHUSER 1 #define GFP_ATOMIC 1 #define GFP_NOFS 1 -#define IS_ERR(a) (((a) && abs((long)(a)) < 500) ? 1 : 0) +#define IS_ERR(a) ((unsigned long)(a) < 1000) #define PTR_ERR(a) ((long)(a)) #define ERR_PTR(a) ((void*)((long)(a))) -#define capable(foo) 1 -#define CAP_SYS_ADMIN 1 - typedef struct { void *cwd; }mm_segment_t; @@ -130,19 +160,12 @@ struct file; /* forward ref */ typedef int (write_proc_t)(struct file *file, const char *buffer, unsigned long count, void *data); -# define le16_to_cpu(x) __le16_to_cpu(x) -# define cpu_to_le16(x) __cpu_to_le16(x) -# define le32_to_cpu(x) __le32_to_cpu(x) -# define cpu_to_le32(x) __cpu_to_le32(x) -# define le64_to_cpu(x) __le64_to_cpu(x) -# define cpu_to_le64(x) __cpu_to_le64(x) - #define NIPQUAD(addr) \ ((unsigned char *)&addr)[0], \ ((unsigned char *)&addr)[1], \ ((unsigned char *)&addr)[2], \ ((unsigned char *)&addr)[3] - + #if defined(__LITTLE_ENDIAN) #define HIPQUAD(addr) \ ((unsigned char *)&addr)[3], \ @@ -156,35 +179,38 @@ typedef int (write_proc_t)(struct file *file, const char *buffer, #endif /* __LITTLE_ENDIAN */ /* bits ops */ -static __inline__ int set_bit(int nr,long * addr) + +/* a long can be more than 32 bits, so use BITS_PER_LONG + * to allow the compiler to adjust the bit shifting accordingly + */ + +/* test if bit nr is set in bitmap addr; returns previous value of bit nr */ +static __inline__ int set_bit(int nr, long * addr) { - int mask, retval; + long mask; - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *addr) != 0; - *addr |= mask; - return retval; + addr += nr / BITS_PER_LONG; + mask = 1UL << (nr & (BITS_PER_LONG - 1)); + nr = (mask & *addr) != 0; + *addr |= mask; + return nr; } +/* clear bit nr in bitmap addr; returns previous value of bit nr*/ static __inline__ int clear_bit(int nr, long * addr) { - int mask, retval; + long mask; - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *addr) != 0; - *addr &= ~mask; - return retval; + addr += nr / BITS_PER_LONG; + mask = 1UL << (nr & (BITS_PER_LONG - 1)); + nr = (mask & *addr) != 0; + *addr &= ~mask; + return nr; } static __inline__ int test_bit(int nr, long * addr) { - int mask; - - addr += nr >> 5; - mask = 1 << (nr & 0x1f); - return ((mask & *addr) != 0); + return ((1UL << (nr & (BITS_PER_LONG - 1))) & ((addr)[nr / BITS_PER_LONG])) != 0; } static __inline__ int ext2_set_bit(int nr, void *addr) @@ -260,6 +286,7 @@ extern int ldlm_init(void); extern int osc_init(void); extern int lov_init(void); extern int mdc_init(void); +extern int mgc_init(void); extern int echo_client_init(void); @@ -273,6 +300,7 @@ typedef __u64 kdev_t; #define SPIN_LOCK_UNLOCKED (spinlock_t) { } #define LASSERT_SPIN_LOCKED(lock) do {} while(0) +#define LASSERT_SEM_LOCKED(sem) do {} while(0) static inline void spin_lock(spinlock_t *l) {return;} static inline void spin_unlock(spinlock_t *l) {return;} @@ -291,33 +319,29 @@ static inline void spin_unlock_irqrestore(spinlock_t *a, unsigned long b) {} #ifndef min_t #define min_t(type,x,y) \ - ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; }) + ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; }) #endif #ifndef max_t #define max_t(type,x,y) \ - ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; }) + ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; }) #endif -/* registering symbols */ +#define simple_strtol strtol +/* registering symbols */ +#ifndef ERESTARTSYS #define ERESTARTSYS ERESTART +#endif #define HZ 1 /* random */ -static inline void get_random_bytes(void *ptr, int size) -{ - int *p = (int *)ptr; - int i, count = size/sizeof(int); - - for (i = 0; i< count; i++) - *p++ = rand(); -} +void get_random_bytes(void *ptr, int size); /* memory */ -/* FIXME */ -#define num_physpages (16 * 1024) +/* memory size: used for some client tunables */ +#define num_physpages (256 * 1024) /* 1GB */ static inline int copy_from_user(void *a,void *b, int c) { @@ -359,31 +383,8 @@ static inline int kmem_cache_destroy(kmem_cache_t *a) free(a); return 0; } -#define kmem_cache_alloc(cache, prio) malloc(cache->size) -#define kmem_cache_free(cache, obj) free(obj) -#define PAGE_CACHE_SIZE PAGE_SIZE -#define PAGE_CACHE_SHIFT 12 -#define PAGE_CACHE_MASK PAGE_MASK - -/* XXX - * for this moment, liblusre will not rely OST for non-page-aligned write - */ -#define LIBLUSTRE_HANDLE_UNALIGNED_PAGE - -struct page { - void *addr; - unsigned long index; - struct list_head list; - unsigned long private; - - /* internally used by liblustre file i/o */ - int _offset; - int _count; -#ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE - int _managed; -#endif -}; +/* struct page decl moved out from here into portals/include/libcfs/user-prim.h */ /* 2.4 defines */ #define PAGE_LIST_ENTRY list @@ -392,16 +393,16 @@ struct page { #define kmap(page) (page)->addr #define kunmap(a) do {} while (0) -static inline struct page *alloc_pages(int mask, unsigned long order) +static inline cfs_page_t *alloc_pages(int mask, unsigned long order) { - struct page *pg = malloc(sizeof(*pg)); + cfs_page_t *pg = malloc(sizeof(*pg)); if (!pg) return NULL; #if 0 //#ifdef MAP_ANONYMOUS pg->addr = mmap(0, PAGE_SIZE << order, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); #else - pg->addr = malloc(PAGE_SIZE << order); + pg->addr = malloc(CFS_PAGE_SIZE << order); #endif if (!pg->addr) { @@ -413,7 +414,7 @@ static inline struct page *alloc_pages(int mask, unsigned long order) #define alloc_page(mask) alloc_pages((mask), 0) -static inline void __free_pages(struct page *pg, int what) +static inline void __free_pages(cfs_page_t *pg, int what) { #if 0 //#ifdef MAP_ANONYMOUS munmap(pg->addr, PAGE_SIZE); @@ -426,9 +427,9 @@ static inline void __free_pages(struct page *pg, int what) #define __free_page(page) __free_pages((page), 0) #define free_page(page) __free_page(page) -static inline struct page* __grab_cache_page(unsigned long index) +static inline cfs_page_t* __grab_cache_page(unsigned long index) { - struct page *pg = alloc_pages(0, 0); + cfs_page_t *pg = alloc_pages(0, 0); if (pg) pg->index = index; @@ -474,6 +475,7 @@ struct iattr { time_t ia_ctime; unsigned int ia_attr_flags; }; +#define ll_iattr_struct iattr #define IT_OPEN 0x0001 #define IT_CREAT 0x0002 @@ -538,7 +540,9 @@ struct semaphore { /* use the macro's argument to avoid unused warnings */ #define down(a) do { (void)a; } while (0) +#define mutex_down(a) down(a) #define up(a) do { (void)a; } while (0) +#define mutex_up(a) up(a) #define down_read(a) do { (void)a; } while (0) #define up_read(a) do { (void)a; } while (0) #define down_write(a) do { (void)a; } while (0) @@ -551,7 +555,12 @@ static inline void init_MUTEX (struct semaphore *sem) { sema_init(sem, 1); } +static inline void init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} +#define init_mutex(s) init_MUTEX(s) typedef struct { struct list_head sleepers; @@ -566,24 +575,33 @@ struct signal { int signal; }; -struct fs_struct { - int umask; -}; - struct task_struct { - struct fs_struct *fs; int state; struct signal pending; char comm[32]; int pid; int fsuid; int fsgid; + int max_groups; + int ngroups; + gid_t *groups; __u32 cap_effective; }; -extern struct task_struct *current; +typedef struct task_struct cfs_task_t; +#define cfs_current() current +#define cfs_curproc_pid() (current->pid) +#define cfs_curproc_comm() (current->comm) -#define in_group_p(a) 0 /* FIXME */ +extern struct task_struct *current; +int in_group_p(gid_t gid); +static inline int capable(int cap) +{ + if (current->cap_effective & (1 << cap)) + return 1; + else + return 0; +} #define set_current_state(foo) do { current->state = foo; } while (0) @@ -623,6 +641,7 @@ static inline int schedule_timeout(signed long t) } #define lock_kernel() do {} while (0) +#define unlock_kernel() do {} while (0) #define daemonize(l) do {} while (0) #define sigfillset(l) do {} while (0) #define recalc_sigpending(l) do {} while (0) @@ -644,8 +663,10 @@ static inline int schedule_timeout(signed long t) _ret = tv.tv_sec; \ _ret; \ }) -#define time_after(a, b) ((long)(b) - (long)(a) > 0) +#define get_jiffies_64() (__u64)jiffies +#define time_after(a, b) ((long)(b) - (long)(a) < 0) #define time_before(a, b) time_after(b,a) +#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0) struct timer_list { struct list_head tl_list; @@ -656,7 +677,7 @@ struct timer_list { static inline int timer_pending(struct timer_list *l) { - if (l->expires > jiffies) + if (time_after(l->expires, jiffies)) return 1; else return 0; @@ -683,6 +704,7 @@ typedef struct { volatile int counter; } atomic_t; #define atomic_read(a) ((a)->counter) #define atomic_set(a,b) do {(a)->counter = b; } while (0) #define atomic_dec_and_test(a) ((--((a)->counter)) == 0) +#define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0) #define atomic_inc(a) (((a)->counter)++) #define atomic_dec(a) do { (a)->counter--; } while (0) #define atomic_add(b,a) do {(a)->counter += b;} while (0) @@ -695,24 +717,43 @@ typedef struct { volatile int counter; } atomic_t; #define unlikely(exp) (exp) #endif +/* FIXME sys/capability will finally included linux/fs.h thus + * cause numerous trouble on x86-64. as temporary solution for + * build broken at cary, we copy definition we need from capability.h + * FIXME + */ +struct _cap_struct; +typedef struct _cap_struct *cap_t; +typedef int cap_value_t; +typedef enum { + CAP_EFFECTIVE=0, + CAP_PERMITTED=1, + CAP_INHERITABLE=2 +} cap_flag_t; +typedef enum { + CAP_CLEAR=0, + CAP_SET=1 +} cap_flag_value_t; + +#define CAP_DAC_OVERRIDE 1 +#define CAP_DAC_READ_SEARCH 2 +#define CAP_FOWNER 3 +#define CAP_FSETID 4 +#define CAP_SYS_ADMIN 21 + +cap_t cap_get_proc(void); +int cap_get_flag(cap_t, cap_value_t, cap_flag_t, cap_flag_value_t *); + /* log related */ static inline int llog_init_commit_master(void) { return 0; } static inline int llog_cleanup_commit_master(int force) { return 0; } -static inline void portals_run_lbug_upcall(char *file, const char *fn, +static inline void libcfs_run_lbug_upcall(char *file, const char *fn, const int l){} -#define LBUG() \ - do { \ - printf("!!!LBUG at %s:%d\n", __FILE__, __LINE__); \ - sleep(1000000); \ - } while (0) - - - /* completion */ struct completion { unsigned int done; - wait_queue_head_t wait; + cfs_waitq_t wait; }; #define COMPLETION_INITIALIZER(work) \ @@ -731,20 +772,130 @@ static inline void init_completion(struct completion *x) struct liblustre_wait_callback { struct list_head llwc_list; + const char *llwc_name; int (*llwc_fn)(void *arg); void *llwc_arg; }; -void *liblustre_register_wait_callback(int (*fn)(void *arg), void *arg); +void *liblustre_register_wait_callback(const char *name, + int (*fn)(void *arg), void *arg); void liblustre_deregister_wait_callback(void *notifier); int liblustre_wait_event(int timeout); -#include -#include -#include -#include -#include -#include +void *liblustre_register_idle_callback(const char *name, + int (*fn)(void *arg), void *arg); +void liblustre_deregister_idle_callback(void *notifier); +void liblustre_wait_idle(void); + +/* flock related */ +struct nfs_lock_info { + __u32 state; + __u32 flags; + void *host; +}; + +typedef struct file_lock { + struct file_lock *fl_next; /* singly linked list for this inode */ + struct list_head fl_link; /* doubly linked list of all locks */ + struct list_head fl_block; /* circular list of blocked processes */ + void *fl_owner; + unsigned int fl_pid; + cfs_waitq_t fl_wait; + struct file *fl_file; + unsigned char fl_flags; + unsigned char fl_type; + loff_t fl_start; + loff_t fl_end; + + void (*fl_notify)(struct file_lock *); /* unblock callback */ + void (*fl_insert)(struct file_lock *); /* lock insertion callback */ + void (*fl_remove)(struct file_lock *); /* lock removal callback */ + + void *fl_fasync; /* for lease break notifications */ + unsigned long fl_break_time; /* for nonblocking lease breaks */ + + union { + struct nfs_lock_info nfs_fl; + } fl_u; +} cfs_flock_t; + +#define cfs_flock_type(fl) ((fl)->fl_type) +#define cfs_flock_set_type(fl, type) do { (fl)->fl_type = (type); } while(0) +#define cfs_flock_pid(fl) ((fl)->fl_pid) +#define cfs_flock_set_pid(fl, pid) do { (fl)->fl_pid = (pid); } while(0) +#define cfs_flock_start(fl) ((fl)->fl_start) +#define cfs_flock_set_start(fl, start) do { (fl)->fl_start = (start); } while(0) +#define cfs_flock_end(fl) ((fl)->fl_end) +#define cfs_flock_set_end(fl, end) do { (fl)->fl_end = (end); } while(0) + +#ifndef OFFSET_MAX +#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) +#define OFFSET_MAX INT_LIMIT(loff_t) +#endif + +/* XXX: defined in kernel */ +#define FL_POSIX 1 +#define FL_SLEEP 128 + +/* quota */ +#define QUOTA_OK 0 +#define NO_QUOTA 1 + +/* ACL */ +struct posix_acl_entry { + short e_tag; + unsigned short e_perm; + unsigned int e_id; +}; + +struct posix_acl { + atomic_t a_refcount; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +typedef struct { + __u16 e_tag; + __u16 e_perm; + __u32 e_id; +} xattr_acl_entry; + +typedef struct { + __u32 a_version; + xattr_acl_entry a_entries[0]; +} xattr_acl_header; + +static inline size_t xattr_acl_size(int count) +{ + return sizeof(xattr_acl_header) + count * sizeof(xattr_acl_entry); +} + +static inline +struct posix_acl * posix_acl_from_xattr(const void *value, size_t size) +{ + return NULL; +} + +static inline +int posix_acl_valid(const struct posix_acl *acl) +{ + return 0; +} + +static inline +void posix_acl_release(struct posix_acl *acl) +{ +} + +#ifndef ENOTSUPP +#define ENOTSUPP ENOTSUP +#endif +#include +#include +#include +#include +#include +#include #endif