X-Git-Url: https://git.whamcloud.com/gitweb?a=blobdiff_plain;f=lustre%2Finclude%2Flinux%2Flustre_compat25.h;h=2adb2bd3e8bac6561515b7171372c590987e8f64;hb=fbb7ead129258897f5a5d5c9ce28d31fbbe5bca2;hp=120e996ef2d926c8cd312d9116b0ce9bdb6a5835;hpb=3de901fceee79de12a31428bcc6ba3a00f10d1fe;p=fs%2Flustre-release.git diff --git a/lustre/include/linux/lustre_compat25.h b/lustre/include/linux/lustre_compat25.h index 120e996..2adb2bd 100644 --- a/lustre/include/linux/lustre_compat25.h +++ b/lustre/include/linux/lustre_compat25.h @@ -29,10 +29,85 @@ #error sorry, lustre requires at least 2.5.69 #endif -#include +#include + +/* + * groups_info related staff + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) + +#define NGROUPS_SMALL NGROUPS +#define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t))) +struct group_info { + int ngroups; + atomic_t usage; + gid_t small_block[NGROUPS_SMALL]; + int nblocks; + gid_t *blocks[0]; +}; +#define current_ngroups current->ngroups + +struct group_info *groups_alloc(int gidsetsize); +void groups_free(struct group_info *ginfo); +int groups_search(struct group_info *ginfo, gid_t grp); + +#define get_group_info(group_info) \ + do { \ + atomic_inc(&(group_info)->usage); \ + } while (0) + +#define put_group_info(group_info) \ + do { \ + if (atomic_dec_and_test(&(group_info)->usage)) \ + groups_free(group_info); \ + } while (0) + +#define groups_sort(gi) do {} while (0) +#define GROUP_AT(gi, i) ((gi)->small_block[(i)]) + +static inline int cleanup_group_info(void) +{ + /* Get rid of unneeded supplementary groups */ + current->ngroups = 0; + memset(current->groups, 0, sizeof(current->groups)); + return 0; +} + +#else /* >= 2.6.4 */ + +#define current_ngroups current->group_info->ngroups + +void groups_sort(struct group_info *ginfo); +int groups_search(struct group_info *ginfo, gid_t grp); + +static inline int cleanup_group_info(void) +{ + struct group_info *ginfo; + + ginfo = groups_alloc(0); + if (!ginfo) + return -ENOMEM; + + set_current_groups(ginfo); + put_group_info(ginfo); + + return 0; +} +#endif /* end of groups_info stuff */ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) +/* New (actually old) intent naming */ +#define lookup_intent open_intent + +/* And internals */ +#define it_flags flags +#define it_op op +#define it_magic magic +#define it_op_release op_release +#define it_create_mode create_mode + /* * OBD need working random driver, thus all our * initialization routines must be called after device @@ -77,35 +152,33 @@ static inline void lustre_daemonize_helper(void) { LASSERT(current->signal != NULL); - current->session = 1; + current->signal->session = 1; if (current->group_leader) - current->group_leader->__pgrp = 1; + current->group_leader->signal->pgrp = 1; else CERROR("we aren't group leader\n"); - current->tty = NULL; + current->signal->tty = NULL; } -static inline int cleanup_group_info(void) -{ - struct group_info *ginfo; - - ginfo = groups_alloc(2); - if (!ginfo) - return -ENOMEM; - - ginfo->ngroups = 0; - set_current_groups(ginfo); - put_group_info(ginfo); - - return 0; -} - -#define smp_num_cpus NR_CPUS - -#ifndef conditional_schedule -#define conditional_schedule() cond_resched() +#define __set_page_ll_data(page, llap) \ + do { \ + page_cache_get(page); \ + SetPagePrivate(page); \ + page->private = (unsigned long)llap; \ + } while (0) +#define __clear_page_ll_data(page) \ + do { \ + ClearPagePrivate(page); \ + page_cache_release(page); \ + page->private = 0; \ + } while(0) + +#ifndef smp_num_cpus +#define smp_num_cpus num_online_cpus() #endif +#define kiobuf bio + #include #else /* 2.4.. */ @@ -175,23 +248,41 @@ static inline void lustre_daemonize_helper(void) current->tty = NULL; } -static inline int cleanup_group_info(void) +#ifndef HAVE_COND_RESCHED +static inline void cond_resched(void) { - /* Get rid of unneeded supplementary groups */ - current->ngroups = 0; - memset(current->groups, 0, sizeof(current->groups)); - return 0; + if (unlikely(need_resched())) { + set_current_state(TASK_RUNNING); + schedule(); + } } - -#ifndef conditional_schedule -#define conditional_schedule() if (unlikely(need_resched())) schedule() #endif +static inline int mapping_mapped(struct address_space *mapping) +{ + if (mapping->i_mmap_shared) + return 1; + if (mapping->i_mmap) + return 1; + return 0; +} + /* to find proc_dir_entry from inode. 2.6 has native one -bzzz */ #ifndef HAVE_PDE #define PDE(ii) ((ii)->u.generic_ip) #endif +#define __set_page_ll_data(page, llap) page->private = (unsigned long)llap +#define __clear_page_ll_data(page) page->private = 0 +#define PageWriteback(page) 0 +#define end_page_writeback(page) + +#ifdef ZAP_PAGE_RANGE_VMA +#define ll_zap_page_range(vma, addr, len) zap_page_range(vma, addr, len) +#else +#define ll_zap_page_range(vma, addr, len) zap_page_range(vma->vm_mm, addr, len) +#endif + #endif /* end of 2.4 compat macros */ #ifdef HAVE_PAGE_LIST @@ -231,5 +322,43 @@ static inline int mapping_has_pages(struct address_space *mapping) } #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) +#define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0) +#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path) +#else +#define ll_set_dflags(dentry, flags) do { \ + spin_lock(&dentry->d_lock); \ + dentry->d_flags |= flags; \ + spin_unlock(&dentry->d_lock); \ + } while(0) +#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode) +#endif + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#endif + +#ifdef HAVE_I_ALLOC_SEM +#define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0) +#define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0) +#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0) + +#define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0) +#define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0) +#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0) +#define MDS_PACK_MD_LOCK 1 +#else +#define UP_READ_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0) +#define DOWN_READ_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0) +#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0) + +#define UP_WRITE_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0) +#define DOWN_WRITE_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0) +#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0) +#define MDS_PACK_MD_LOCK 0 +#endif + #endif /* __KERNEL__ */ #endif /* _COMPAT25_H */