* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#error sorry, lustre requires at least linux kernel 2.6.9 or later
#endif
+#include <linux/fs_struct.h>
#include <libcfs/linux/portals_compat25.h>
#include <linux/lustre_patchless_compat.h>
struct dentry *old_pwd;
struct vfsmount *old_pwdmnt;
- write_lock(&fs->lock);
+ cfs_write_lock(&fs->lock);
old_pwd = fs->pwd;
old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
- write_unlock(&fs->lock);
+ cfs_write_unlock(&fs->lock);
if (old_pwd) {
dput(old_pwd);
#define ATTR_BLOCKS (1 << 27)
#if HAVE_INODE_I_MUTEX
-#define UNLOCK_INODE_MUTEX(inode) do {mutex_unlock(&(inode)->i_mutex); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {mutex_lock(&(inode)->i_mutex); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_unlock(&(inode)->i_mutex); } while(0)
+#define LOCK_INODE_MUTEX(inode) \
+do {cfs_mutex_lock(&(inode)->i_mutex); } while(0)
#define LOCK_INODE_MUTEX_PARENT(inode) \
-do {mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
-#define TRYLOCK_INODE_MUTEX(inode) mutex_trylock(&(inode)->i_mutex)
+do {cfs_mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
+#define TRYLOCK_INODE_MUTEX(inode) cfs_mutex_trylock(&(inode)->i_mutex)
#else
-#define UNLOCK_INODE_MUTEX(inode) do {up(&(inode)->i_sem); } while(0)
-#define LOCK_INODE_MUTEX(inode) do {down(&(inode)->i_sem); } while(0)
+#define UNLOCK_INODE_MUTEX(inode) do cfs_up(&(inode)->i_sem); } while(0)
+#define LOCK_INODE_MUTEX(inode) do cfs_down(&(inode)->i_sem); } while(0)
#define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
#define LOCK_INODE_MUTEX_PARENT(inode) LOCK_INODE_MUTEX(inode)
#endif /* HAVE_INODE_I_MUTEX */
#ifdef HAVE_SEQ_LOCK
-#define LL_SEQ_LOCK(seq) mutex_lock(&(seq)->lock)
-#define LL_SEQ_UNLOCK(seq) mutex_unlock(&(seq)->lock)
+#define LL_SEQ_LOCK(seq) cfs_mutex_lock(&(seq)->lock)
+#define LL_SEQ_UNLOCK(seq) cfs_mutex_unlock(&(seq)->lock)
#else
-#define LL_SEQ_LOCK(seq) down(&(seq)->sem)
-#define LL_SEQ_UNLOCK(seq) up(&(seq)->sem)
+#define LL_SEQ_LOCK(seq) cfs_down(&(seq)->sem)
+#define LL_SEQ_UNLOCK(seq) cfs_up(&(seq)->sem)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
#endif
#ifdef HAVE_DQUOTOFF_MUTEX
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {mutex_unlock(&(dqopt)->dqonoff_mutex); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {mutex_lock(&(dqopt)->dqonoff_mutex); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_unlock(&(dqopt)->dqonoff_mutex)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_lock(&(dqopt)->dqonoff_mutex)
#else
-#define UNLOCK_DQONOFF_MUTEX(dqopt) do {up(&(dqopt)->dqonoff_sem); } while(0)
-#define LOCK_DQONOFF_MUTEX(dqopt) do {down(&(dqopt)->dqonoff_sem); } while(0)
+#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_up(&(dqopt)->dqonoff_sem)
+#define LOCK_DQONOFF_MUTEX(dqopt) cfs_down(&(dqopt)->dqonoff_sem)
#endif /* HAVE_DQUOTOFF_MUTEX */
-#define current_ngroups current->group_info->ngroups
-#define current_groups current->group_info->small_block
+#define current_ngroups current_cred()->group_info->ngroups
+#define current_groups current_cred()->group_info->small_block
#ifndef page_private
#define page_private(page) ((page)->private)
#define gfp_t int
#endif
-#define lock_dentry(___dentry) spin_lock(&(___dentry)->d_lock)
-#define unlock_dentry(___dentry) spin_unlock(&(___dentry)->d_lock)
+#define lock_dentry(___dentry) cfs_spin_lock(&(___dentry)->d_lock)
+#define unlock_dentry(___dentry) cfs_spin_unlock(&(___dentry)->d_lock)
#define ll_kernel_locked() kernel_locked()
#define LTIME_S(time) (time.tv_sec)
#define ll_path_lookup path_lookup
+
+#ifdef HAVE_EXPORT_INODE_PERMISSION
+#define ll_permission(inode,mask,nd) inode_permission(inode,mask)
+#else
#define ll_permission(inode,mask,nd) permission(inode,mask,nd)
+#endif
-#define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
-#define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
+#define ll_pgcache_lock(mapping) cfs_spin_lock(&mapping->page_lock)
+#define ll_pgcache_unlock(mapping) cfs_spin_unlock(&mapping->page_lock)
#define ll_call_writepage(inode, page) \
(inode)->i_mapping->a_ops->writepage(page, NULL)
#define ll_invalidate_inode_pages(inode) \
#define to_kdev_t(dev) (dev)
#define kdev_t_to_nr(dev) (dev)
#define val_to_kdev(dev) (dev)
-#define ILOOKUP(sb, ino, test, data) ilookup5(sb, ino, test, (void *)(data));
+
+#ifdef HAVE_BLKDEV_PUT_2ARGS
+#define ll_blkdev_put(a, b) blkdev_put(a, b)
+#else
+#define ll_blkdev_put(a, b) blkdev_put(a)
+#endif
+
+#ifdef HAVE_DENTRY_OPEN_4ARGS
+#define ll_dentry_open(a, b, c, d) dentry_open(a, b, c, d)
+#else
+#define ll_dentry_open(a, b, c, d) dentry_open(a, b, c)
+#endif
#include <linux/writeback.h>
-static inline int cleanup_group_info(void)
+static inline int cfs_cleanup_group_info(void)
{
struct group_info *ginfo;
extern void __d_rehash(struct dentry *dentry, int lock);
#endif
-#if !defined(HAVE_D_MOVE_LOCKED) && defined(HAVE___D_MOVE)
-#define d_move_locked(dentry, target) __d_move(dentry, target)
-extern void __d_move(struct dentry *dentry, struct dentry *target);
-#endif
-
#ifdef HAVE_CAN_SLEEP_ARG
#define ll_flock_lock_file_wait(file, lock, can_sleep) \
flock_lock_file_wait(file, lock, can_sleep)
int rc = 1;
ll_pgcache_lock(mapping);
- if (list_empty(&mapping->dirty_pages) &&
- list_empty(&mapping->clean_pages) &&
- list_empty(&mapping->locked_pages)) {
+ if (cfs_list_empty(&mapping->dirty_pages) &&
+ cfs_list_empty(&mapping->clean_pages) &&
+ cfs_list_empty(&mapping->locked_pages)) {
rc = 0;
}
ll_pgcache_unlock(mapping);
#define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
vfs_symlink(dir, dentry, path)
#endif
-#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
-#define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
-#else
#define ll_set_dflags(dentry, flags) do { \
- spin_lock(&dentry->d_lock); \
+ cfs_spin_lock(&dentry->d_lock); \
dentry->d_flags |= flags; \
- spin_unlock(&dentry->d_lock); \
+ cfs_spin_unlock(&dentry->d_lock); \
} while(0)
#endif
if (!type)
return ERR_PTR(-ENODEV);
mnt = vfs_kern_mount(type, flags, name, data);
- module_put(type->owner);
+ cfs_module_put(type->owner);
return mnt;
}
#else
#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock)
#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
#else
-#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
#endif
#ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
#define LL_RENAME_DOES_D_MOVE FS_ODD_RENAME
#endif
+#ifndef HAVE_D_OBTAIN_ALIAS
+/* The old d_alloc_anon() didn't free the inode reference on error
+ * like d_obtain_alias(). Hide that difference/inconvenience here. */
+static inline struct dentry *d_obtain_alias(struct inode *inode)
+{
+ struct dentry *anon = d_alloc_anon(inode);
+
+ if (anon == NULL)
+ iput(inode);
+
+ return anon;
+}
+#endif
+
/* add a lustre compatible layer for crypto API */
#include <linux/crypto.h>
#ifdef HAVE_ASYNC_BLOCK_CIPHER
#endif /* HAVE_ASYNC_BLOCK_CIPHER */
#ifndef HAVE_SYNCHRONIZE_RCU
+/* Linux 2.6.32 provides define when !CONFIG_TREE_PREEMPT_RCU */
+#ifndef synchronize_rcu
#define synchronize_rcu() synchronize_kernel()
#endif
+#endif
#ifdef HAVE_FILE_REMOVE_SUID
# define ll_remove_suid(file, mnt) file_remove_suid(file)
vfs_rename(old,old_dir,new,new_dir)
#endif /* HAVE_SECURITY_PLUG */
-#ifndef for_each_possible_cpu
-#define for_each_possible_cpu(i) for_each_cpu(i)
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
#endif
#ifndef cpu_to_node
#endif
#ifdef HAVE_REGISTER_SHRINKER
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
+typedef int (*cfs_shrinker_t)(SHRINKER_FIRST_ARG int nr_to_scan, gfp_t gfp_mask);
static inline
-struct shrinker *set_shrinker(int seek, shrinker_t func)
+struct shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
{
struct shrinker *s;
}
static inline
-void remove_shrinker(struct shrinker *shrinker)
+void cfs_remove_shrinker(struct shrinker *shrinker)
{
if (shrinker == NULL)
return;
}
#endif /* HAVE_REGISTER_SHRINKER */
-/* Using kernel fls(). Userspace will use one defined in user-bitops.h. */
-#ifndef __fls
-#define __fls fls
-#endif
-
#ifdef HAVE_INVALIDATE_INODE_PAGES
#define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
#endif
#endif
#ifndef SLAB_DESTROY_BY_RCU
-#define SLAB_DESTROY_BY_RCU 0
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#else
+#define CFS_SLAB_DESTROY_BY_RCU SLAB_DESTROY_BY_RCU
#endif
#ifdef HAVE_SB_HAS_QUOTA_ACTIVE
return -ENOSYS;
}
+#ifndef HAVE_BLK_QUEUE_LOG_BLK_SIZE /* added in 2.6.31 */
+#define blk_queue_logical_block_size(q, sz) blk_queue_hardsect_size(q, sz)
+#endif
+
+#ifndef HAVE_VFS_DQ_OFF
+# define ll_vfs_dq_init DQUOT_INIT
+# define ll_vfs_dq_drop DQUOT_DROP
+# define ll_vfs_dq_transfer DQUOT_TRANSFER
+# define ll_vfs_dq_off(sb, remount) DQUOT_OFF(sb)
+#else
+# define ll_vfs_dq_init vfs_dq_init
+# define ll_vfs_dq_drop vfs_dq_drop
+# define ll_vfs_dq_transfer vfs_dq_transfer
+# define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
+#endif
+
+#ifdef HAVE_BDI_INIT
+#define ll_bdi_init(bdi) bdi_init(bdi)
+#define ll_bdi_destroy(bdi) bdi_destroy(bdi)
+#else
+#define ll_bdi_init(bdi) 0
+#define ll_bdi_destroy(bdi) do { } while(0)
+#endif
+
+#ifdef HAVE_NEW_BACKING_DEV_INFO
+# define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
+#else
+# define ll_bdi_wb_cnt(bdi) 1
+#endif
+
+#ifdef HAVE_BLK_QUEUE_MAX_SECTORS /* removed in rhel6 */
+#define blk_queue_max_hw_sectors(q, sect) blk_queue_max_sectors(q, sect)
+#endif
+
+#ifndef HAVE_REQUEST_QUEUE_LIMITS
+#define queue_max_sectors(rq) ((rq)->max_sectors)
+#define queue_max_hw_sectors(rq) ((rq)->max_hw_sectors)
+#define queue_max_phys_segments(rq) ((rq)->max_phys_segments)
+#define queue_max_hw_segments(rq) ((rq)->max_hw_segments)
+#endif
+
+#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
+#define blk_queue_max_segments(rq, seg) \
+ do { blk_queue_max_phys_segments(rq, seg); \
+ blk_queue_max_hw_segments(rq, seg); } while (0)
+#else
+#define queue_max_phys_segments(rq) queue_max_segments(rq)
+#define queue_max_hw_segments(rq) queue_max_segments(rq)
+#endif
+
+
+#ifndef HAVE_BI_HW_SEGMENTS
+#define bio_hw_segments(q, bio) 0
+#endif
+
#endif /* __KERNEL__ */
#endif /* _COMPAT25_H */