X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flinux%2Flustre_compat25.h;h=cd610806812bc6dc08a145a084c83bad4e9afd6b;hb=4275694704e0fb82cd6980bec082cf358df0654c;hp=f976cda68aa611ddd33f0e316f3a6345a9225b94;hpb=191061ee668400324f4505cf498f1ee2d57e4962;p=fs%2Flustre-release.git diff --git a/lustre/include/linux/lustre_compat25.h b/lustre/include/linux/lustre_compat25.h index f976cda..cd61080 100644 --- a/lustre/include/linux/lustre_compat25.h +++ b/lustre/include/linux/lustre_compat25.h @@ -20,8 +20,8 @@ * */ -#ifndef _COMPAT25_H -#define _COMPAT25_H +#ifndef _LINUX_COMPAT25_H +#define _LINUX_COMPAT25_H #ifdef __KERNEL__ @@ -29,15 +29,76 @@ #error sorry, lustre requires at least 2.5.69 #endif -#include +#include + +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) +struct ll_iattr { + struct iattr iattr; + unsigned int ia_attr_flags; +}; +#else +#define ll_iattr iattr +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */ + +#ifndef HAVE_SET_FS_PWD +static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, + struct dentry *dentry) +{ + struct dentry *old_pwd; + struct vfsmount *old_pwdmnt; + + write_lock(&fs->lock); + old_pwd = fs->pwd; + old_pwdmnt = fs->pwdmnt; + fs->pwdmnt = mntget(mnt); + fs->pwd = dget(dentry); + write_unlock(&fs->lock); + + if (old_pwd) { + dput(old_pwd); + mntput(old_pwdmnt); + } +} +#else +#define ll_set_fs_pwd set_fs_pwd +#endif /* HAVE_SET_FS_PWD */ /* - * groups_info related staff + * set ATTR_BLOCKS to a high value to avoid any risk of collision with other + * ATTR_* attributes (see bug 13828) */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) +#define ATTR_BLOCKS (1 << 27) + +#if HAVE_INODE_I_MUTEX +#define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0) +#define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0) +#define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex) +#else +#define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0) +#define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0) +#define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem)) +#endif /* HAVE_INODE_I_MUTEX */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) +#define d_child d_u.d_child +#define d_rcu d_u.d_rcu +#endif + +#ifdef HAVE_DQUOTOFF_MUTEX +#define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0) +#define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0) +#else +#define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0) +#define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0) +#endif /* HAVE_DQUOTOFF_MUTEX */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) #define NGROUPS_SMALL NGROUPS #define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t))) + struct group_info { int ngroups; atomic_t usage; @@ -46,58 +107,32 @@ struct group_info { gid_t *blocks[0]; }; #define current_ngroups current->ngroups - +#define current_groups current->groups + struct group_info *groups_alloc(int gidsetsize); void groups_free(struct group_info *ginfo); -int groups_search(struct group_info *ginfo, gid_t grp); - -#define get_group_info(group_info) \ - do { \ - atomic_inc(&(group_info)->usage); \ - } while (0) - -#define put_group_info(group_info) \ - do { \ - if (atomic_dec_and_test(&(group_info)->usage)) \ - groups_free(group_info); \ - } while (0) - -#define groups_sort(gi) do {} while (0) - -#define GROUP_AT(gi, i) ((gi)->small_block[(i)]) - -static inline int cleanup_group_info(void) -{ - /* Get rid of unneeded supplementary groups */ - current->ngroups = 0; - memset(current->groups, 0, sizeof(current->groups)); - return 0; -} - #else /* >= 2.6.4 */ #define current_ngroups current->group_info->ngroups +#define current_groups current->group_info->small_block -void groups_sort(struct group_info *ginfo); -int groups_search(struct group_info *ginfo, gid_t grp); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) */ -static inline int cleanup_group_info(void) -{ - struct group_info *ginfo; - - ginfo = groups_alloc(0); - if (!ginfo) - return -ENOMEM; - - set_current_groups(ginfo); - put_group_info(ginfo); +#ifndef page_private +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) +#endif - return 0; -} -#endif /* end of groups_info stuff */ +#ifndef HAVE_GFP_T +#define gfp_t int +#endif +#define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock) +#define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock) -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) +#define lock_24kernel() do {} while (0) +#define unlock_24kernel() do {} while (0) +#define ll_kernel_locked() kernel_locked() /* * OBD need working random driver, thus all our @@ -110,9 +145,9 @@ static inline int cleanup_group_info(void) #endif /* XXX our code should be using the 2.6 calls, not the other way around */ -#define TryLockPage(page) TestSetPageLocked(page) -#define filemap_fdatasync(mapping) filemap_fdatawrite(mapping) -#define Page_Uptodate(page) PageUptodate(page) +#define TryLockPage(page) TestSetPageLocked(page) +#define Page_Uptodate(page) PageUptodate(page) +#define ll_redirty_page(page) set_page_dirty(page) #define KDEVT_INIT(val) (val) @@ -129,8 +164,7 @@ static inline int cleanup_group_info(void) #define ll_truncate_complete_page(page) \ truncate_complete_page(page->mapping, page) -#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d) - +#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d) #define ll_dev_t dev_t #define kdev_t dev_t #define to_kdev_t(dev) (dev) @@ -140,135 +174,51 @@ static inline int cleanup_group_info(void) #include -static inline void lustre_daemonize_helper(void) +static inline int cleanup_group_info(void) { - LASSERT(current->signal != NULL); - current->signal->session = 1; - if (current->group_leader) - current->group_leader->signal->pgrp = 1; - else - CERROR("we aren't group leader\n"); - current->signal->tty = NULL; + struct group_info *ginfo; + + ginfo = groups_alloc(0); + if (!ginfo) + return -ENOMEM; + + set_current_groups(ginfo); + put_group_info(ginfo); + + return 0; } #define __set_page_ll_data(page, llap) \ do { \ page_cache_get(page); \ SetPagePrivate(page); \ - page->private = (unsigned long)llap; \ + set_page_private(page, (unsigned long)llap); \ } while (0) #define __clear_page_ll_data(page) \ do { \ ClearPagePrivate(page); \ + set_page_private(page, 0); \ page_cache_release(page); \ - page->private = 0; \ } while(0) -#define smp_num_cpus NR_CPUS - #define kiobuf bio #include -#else /* 2.4.. */ - -#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c) -#define ll_permission(inode,mask,nd) permission(inode,mask) -#define ILOOKUP(sb, ino, test, data) ilookup4(sb, ino, test, data); -#define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED -#define ll_dev_t int - -static inline void clear_page_dirty(struct page *page) -{ - if (PageDirty(page)) - ClearPageDirty(page); -} - -/* 2.5 uses hlists for some things, like the d_hash. we'll treat them - * as 2.5 and let macros drop back.. */ -#ifndef HLIST_HEAD /* until we get a kernel newer than l28 */ -#define hlist_entry list_entry -#define hlist_head list_head -#define hlist_node list_head -#define HLIST_HEAD LIST_HEAD -#define INIT_HLIST_HEAD INIT_LIST_HEAD -#define hlist_del_init list_del_init -#define hlist_add_head list_add -#define hlist_for_each_safe list_for_each_safe +#ifndef HAVE___D_REHASH +#define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock) #endif -#define KDEVT_INIT(val) (val) -#define ext3_xattr_set_handle ext3_xattr_set -#define extN_xattr_set_handle extN_xattr_set -#define try_module_get __MOD_INC_USE_COUNT -#define module_put __MOD_DEC_USE_COUNT -#define LTIME_S(time) (time) -#if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online) -#define cpu_online(cpu) (cpu_online_map & (1<i_mapping->a_ops->writepage(page) -#define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode) -#define ll_truncate_complete_page(page) truncate_complete_page(page) - -static inline void __d_drop(struct dentry *dentry) -{ - list_del(&dentry->d_hash); - INIT_LIST_HEAD(&dentry->d_hash); -} - -static inline void lustre_daemonize_helper(void) -{ - current->session = 1; - current->pgrp = 1; - current->tty = NULL; -} -#ifndef HAVE_COND_RESCHED -static inline void cond_resched(void) -{ - if (unlikely(need_resched())) { - set_current_state(TASK_RUNNING); - schedule(); - } -} -#endif - -static inline int mapping_mapped(struct address_space *mapping) -{ - return mapping->i_mmap_shared ? 1 : 0; -} - -/* to find proc_dir_entry from inode. 2.6 has native one -bzzz */ -#ifndef HAVE_PDE -#define PDE(ii) ((ii)->u.generic_ip) -#endif - -#define __set_page_ll_data(page, llap) page->private = (unsigned long)llap -#define __clear_page_ll_data(page) page->private = 0 -#define PageWriteback(page) 0 -#define end_page_writeback(page) - -#ifdef ZAP_PAGE_RANGE_VMA -#define ll_zap_page_range(vma, addr, len) zap_page_range(vma, addr, len) +#ifdef HAVE_CAN_SLEEP_ARG +#define ll_flock_lock_file_wait(file, lock, can_sleep) \ + flock_lock_file_wait(file, lock, can_sleep) #else -#define ll_zap_page_range(vma, addr, len) zap_page_range(vma->vm_mm, addr, len) +#define ll_flock_lock_file_wait(file, lock, can_sleep) \ + flock_lock_file_wait(file, lock) #endif -#endif /* end of 2.4 compat macros */ +#define CheckWriteback(page, cmd) \ + (!(!PageWriteback(page) && cmd == OBD_BRW_WRITE)) #ifdef HAVE_PAGE_LIST static inline int mapping_has_pages(struct address_space *mapping) @@ -285,21 +235,6 @@ static inline int mapping_has_pages(struct address_space *mapping) return rc; } - -static inline int clear_page_dirty_for_io(struct page *page) -{ - struct address_space *mapping = page->mapping; - - if (page->mapping && PageDirty(page)) { - ClearPageDirty(page); - ll_pgcache_lock(mapping); - list_del(&page->list); - list_add(&page->list, &mapping->locked_pages); - ll_pgcache_unlock(mapping); - return 1; - } - return 0; -} #else static inline int mapping_has_pages(struct address_space *mapping) { @@ -307,6 +242,12 @@ static inline int mapping_has_pages(struct address_space *mapping) } #endif +#ifdef HAVE_KIOBUF_KIO_BLOCKS +#define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks) +#else +#define KIOBUF_GET_BLOCKS(k) ((k)->blocks) +#endif + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) #define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0) #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path) @@ -319,24 +260,103 @@ static inline int mapping_has_pages(struct address_space *mapping) #define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode) #endif +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#endif + #ifdef HAVE_I_ALLOC_SEM -#define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0) +#define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0) #define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0) -#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0) +#define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0) + +#define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0) +#define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0) +#define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0) +#else +#define UP_READ_I_ALLOC_SEM(i) do { } while (0) +#define DOWN_READ_I_ALLOC_SEM(i) do { } while (0) +#define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) do { } while (0) + +#define UP_WRITE_I_ALLOC_SEM(i) do { } while (0) +#define DOWN_WRITE_I_ALLOC_SEM(i) do { } while (0) +#define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) do { } while (0) +#endif + +#ifndef HAVE_GRAB_CACHE_PAGE_NOWAIT_GFP +#define grab_cache_page_nowait_gfp(x, y, z) grab_cache_page_nowait((x), (y)) +#endif + +#ifndef HAVE_FILEMAP_FDATAWRITE +#define filemap_fdatawrite(mapping) filemap_fdatasync(mapping) +#endif -#define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0) -#define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0) -#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0) -#define MDS_PACK_MD_LOCK 1 +#ifdef HAVE_VFS_KERN_MOUNT +static inline +struct vfsmount * +ll_kern_mount(const char *fstype, int flags, const char *name, void *data) +{ + struct file_system_type *type = get_fs_type(fstype); + struct vfsmount *mnt; + if (!type) + return ERR_PTR(-ENODEV); + mnt = vfs_kern_mount(type, flags, name, data); + module_put(type->owner); + return mnt; +} #else -#define UP_READ_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0) -#define DOWN_READ_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0) -#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0) - -#define UP_WRITE_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0) -#define DOWN_WRITE_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0) -#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0) -#define MDS_PACK_MD_LOCK 0 +#define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data)) +#endif + +#ifndef HAVE_GENERIC_FILE_READ +static inline +ssize_t +generic_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) +{ + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; + struct kiocb kiocb; + ssize_t ret; + + init_sync_kiocb(&kiocb, filp); + kiocb.ki_pos = *ppos; + kiocb.ki_left = len; + + ret = generic_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos); + *ppos = kiocb.ki_pos; + return ret; +} +#endif + +#ifndef HAVE_GENERIC_FILE_WRITE +static inline +ssize_t +generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) +{ + struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; + struct kiocb kiocb; + ssize_t ret; + + init_sync_kiocb(&kiocb, filp); + kiocb.ki_pos = *ppos; + kiocb.ki_left = len; + + ret = generic_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos); + *ppos = kiocb.ki_pos; + + return ret; +} +#endif + +#ifdef HAVE_STATFS_DENTRY_PARAM +#define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs)) +#else +#define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb), (sfs)) +#endif + +/* task_struct */ +#ifndef HAVE_TASK_PPTR +#define p_pptr parent #endif #endif /* __KERNEL__ */