-- stacktrace_ops.{warning(), warning_symbol()} removed.
-- sk_sleep() helper added.
-- quota_on() 4 parameter change to use 'struct path'.
-- fs_struct.lock change to use spin_lock.
-- other trivial changes.
Change-Id: Ic9bf47454b19c1cfc3e41cd3aebbabb074f6110f
Signed-off-by: Yang Sheng <ys@whamcloud.com>
Reviewed-on: http://review.whamcloud.com/1864
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Lai Siyao <laisiyao@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
+ AC_MSG_CHECKING([whether stacktrace_ops.warning is exist])
+ LB_LINUX_TRY_COMPILE([
+ struct task_struct;
+ struct pt_regs;
+ #include <asm/stacktrace.h>
+ ],[
+ ((struct stacktrace_ops *)0)->warning(NULL, NULL);
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_STACKTRACE_WARNING, 1, [stacktrace_ops.warning is exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ],[
+ ])
AC_MSG_CHECKING([dump_trace want address])
LB_LINUX_TRY_COMPILE([
struct task_struct;
AC_MSG_CHECKING([dump_trace want address])
LB_LINUX_TRY_COMPILE([
struct task_struct;
+#
+# 2.6.35 kernel has sk_sleep function
+#
+AC_DEFUN([LC_SK_SLEEP],
+[AC_MSG_CHECKING([if kernel has sk_sleep])
+LB_LINUX_TRY_COMPILE([
+ #include <net/sock.h>
+],[
+ sk_sleep(NULL);
+],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SK_SLEEP, 1, [kernel has sk_sleep])
+],[
+ AC_MSG_RESULT(no)
+])
+])
#
# FC15 2.6.40-5 backported the "shrink_control" parameter to the memory
#
# FC15 2.6.40-5 backported the "shrink_control" parameter to the memory
LIBCFS_SYSCTL_CTLNAME
# 2.6.34
LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
LIBCFS_SYSCTL_CTLNAME
# 2.6.34
LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
# 2.6.40 fc15
LC_SHRINK_CONTROL
])
# 2.6.40 fc15
LC_SHRINK_CONTROL
])
return sock->sk->sk_wmem_queued;
}
return sock->sk->sk_wmem_queued;
}
+#ifndef HAVE_SK_SLEEP
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+ return sk->sk_sleep;
+}
+#endif
+
#ifdef HAVE_INIT_NET
#define DEFAULT_NET (&init_net)
#else
#ifdef HAVE_INIT_NET
#define DEFAULT_NET (&init_net)
#else
#include <linux/nmi.h>
#include <asm/stacktrace.h>
#include <linux/nmi.h>
#include <asm/stacktrace.h>
+#ifdef HAVE_STACKTRACE_WARNING
static void
print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
static void
print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
{
printk("%s%s\n", (char *)data, msg);
}
{
printk("%s%s\n", (char *)data, msg);
}
static int print_trace_stack(void *data, char *name)
{
static int print_trace_stack(void *data, char *name)
{
}
static DUMP_TRACE_CONST struct stacktrace_ops print_trace_ops = {
}
static DUMP_TRACE_CONST struct stacktrace_ops print_trace_ops = {
+#ifdef HAVE_STACKTRACE_WARNING
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
.address = print_trace_address,
#ifdef STACKTRACE_OPS_HAVE_WALK_STACK
.stack = print_trace_stack,
.address = print_trace_address,
#ifdef STACKTRACE_OPS_HAVE_WALK_STACK
newsock->ops = sock->ops;
set_current_state(TASK_INTERRUPTIBLE);
newsock->ops = sock->ops;
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(sock->sk->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sock->sk), &wait);
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
if (rc == -EAGAIN) {
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
if (rc == -EAGAIN) {
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
}
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
}
- remove_wait_queue(sock->sk->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sock->sk), &wait);
set_current_state(TASK_RUNNING);
if (rc != 0)
set_current_state(TASK_RUNNING);
if (rc != 0)
void
libcfs_sock_abort_accept (struct socket *sock)
{
void
libcfs_sock_abort_accept (struct socket *sock)
{
- wake_up_all(sock->sk->sk_sleep);
+ wake_up_all(sk_sleep(sock->sk));
}
EXPORT_SYMBOL(libcfs_sock_abort_accept);
}
EXPORT_SYMBOL(libcfs_sock_abort_accept);
LB_LINUX_TRY_COMPILE([
#include <linux/fs.h>
],[
LB_LINUX_TRY_COMPILE([
#include <linux/fs.h>
],[
- struct super_operations *sop = NULL;
- sop->statfs((struct dentry *)0, (struct kstatfs*)0);
+ ((struct super_operations *)0)->statfs((struct dentry *)0, (struct kstatfs*)0);
],[
AC_DEFINE(HAVE_STATFS_DENTRY_PARAM, 1,
],[
AC_DEFINE(HAVE_STATFS_DENTRY_PARAM, 1,
- [super_ops.statfs() first parameter is dentry])
+ [super_ops.statfs() first parameter is dentry])
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
AC_DEFUN([LC_PAGE_CHECKED],
[AC_MSG_CHECKING([kernel has PageChecked and SetPageChecked])
LB_LINUX_TRY_COMPILE([
AC_DEFUN([LC_PAGE_CHECKED],
[AC_MSG_CHECKING([kernel has PageChecked and SetPageChecked])
LB_LINUX_TRY_COMPILE([
#ifdef HAVE_LINUX_MMTYPES_H
#include <linux/mm_types.h>
#endif
#ifdef HAVE_LINUX_MMTYPES_H
#include <linux/mm_types.h>
#endif
- #include <linux/page-flags.h>
+ #include <linux/page-flags.h>
/* before 2.6.26 this define*/
#ifndef PageChecked
/* before 2.6.26 this define*/
#ifndef PageChecked
- /* 2.6.26 use function instead of define for it */
- SetPageChecked(p);
- PageChecked(p);
- #endif
+ /* 2.6.26 use function instead of define for it */
+ SetPageChecked(p);
+ PageChecked(p);
+ #endif
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PAGE_CHECKED, 1,
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PAGE_CHECKED, 1,
+# 2.6.36 fs_struct.lock use spinlock instead of rwlock.
+#
+AC_DEFUN([LC_FS_STRUCT_RWLOCK],
+[AC_MSG_CHECKING([if fs_struct.lock use rwlock])
+LB_LINUX_TRY_COMPILE([
+ #include <asm/atomic.h>
+ #include <linux/spinlock.h>
+ #include <linux/fs_struct.h>
+],[
+ ((struct fs_struct *)0)->lock = (rwlock_t){ 0 };
+],[
+ AC_DEFINE(HAVE_FS_STRUCT_RWLOCK, 1,
+ [fs_struct.lock use rwlock])
+ AC_MSG_RESULT([yes])
+],[
+ AC_MSG_RESULT([no])
+])
+])
+
+#
# 2.6.36 super_operations add evict_inode method. it hybird of
# delete_inode & clear_inode.
#
# 2.6.36 super_operations add evict_inode method. it hybird of
# delete_inode & clear_inode.
#
+# 2.6.38 use path as 4th parameter in quota_on.
+#
+AC_DEFUN([LC_QUOTA_ON_USE_PATH],
+[AC_MSG_CHECKING([quota_on use path as parameter])
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_LINUX_TRY_COMPILE([
+ #include <linux/fs.h>
+ #include <linux/quota.h>
+],[
+ ((struct quotactl_ops *)0)->quota_on(NULL, 0, 0, ((struct path*)0));
+],[
+ AC_DEFINE(HAVE_QUOTA_ON_USE_PATH, 1,
+ [quota_on use path as 4th paramter])
+ AC_MSG_RESULT([yes])
+],[
+ AC_MSG_RESULT([no])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+])
+
+#
# 2.6.39 remove unplug_fn from request_queue.
#
AC_DEFUN([LC_REQUEST_QUEUE_UNPLUG_FN],
# 2.6.39 remove unplug_fn from request_queue.
#
AC_DEFUN([LC_REQUEST_QUEUE_UNPLUG_FN],
LC_SBOPS_EVICT_INODE
# 2.6.38
LC_ATOMIC_MNT_COUNT
LC_BLKDEV_GET_BY_DEV
LC_GENERIC_PERMISSION
LC_SBOPS_EVICT_INODE
# 2.6.38
LC_ATOMIC_MNT_COUNT
LC_BLKDEV_GET_BY_DEV
LC_GENERIC_PERMISSION
# 2.6.39
LC_REQUEST_QUEUE_UNPLUG_FN
# 2.6.39
LC_REQUEST_QUEUE_UNPLUG_FN
#endif
#include <linux/fs_struct.h>
#endif
#include <linux/fs_struct.h>
+#include <linux/namei.h>
#include <libcfs/linux/portals_compat25.h>
#include <linux/lustre_patchless_compat.h>
#include <libcfs/linux/portals_compat25.h>
#include <linux/lustre_patchless_compat.h>
#define ll_iattr iattr
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
#define ll_iattr iattr
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) */
+#ifdef HAVE_FS_STRUCT_RWLOCK
+# define LOCK_FS_STRUCT(fs) cfs_write_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) cfs_write_unlock(&(fs)->lock)
+#else
+# define LOCK_FS_STRUCT(fs) cfs_spin_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) cfs_spin_unlock(&(fs)->lock)
+#endif
+
#ifdef HAVE_FS_STRUCT_USE_PATH
static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
struct dentry *dentry)
#ifdef HAVE_FS_STRUCT_USE_PATH
static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
struct dentry *dentry)
path.mnt = mnt;
path.dentry = dentry;
path.mnt = mnt;
path.dentry = dentry;
old_pwd = fs->pwd;
path_get(&path);
fs->pwd = path;
old_pwd = fs->pwd;
path_get(&path);
fs->pwd = path;
- write_unlock(&fs->lock);
if (old_pwd.dentry)
path_put(&old_pwd);
if (old_pwd.dentry)
path_put(&old_pwd);
struct dentry *old_pwd;
struct vfsmount *old_pwdmnt;
struct dentry *old_pwd;
struct vfsmount *old_pwdmnt;
- cfs_write_lock(&fs->lock);
old_pwd = fs->pwd;
old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
old_pwd = fs->pwd;
old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
- cfs_write_unlock(&fs->lock);
if (old_pwd) {
dput(old_pwd);
if (old_pwd) {
dput(old_pwd);
#define ll_sb_has_quota_active(sb, type) sb_has_quota_enabled(sb, type)
#endif
#define ll_sb_has_quota_active(sb, type) sb_has_quota_enabled(sb, type)
#endif
-#ifdef HAVE_SB_ANY_QUOTA_ACTIVE
+#ifdef DQUOT_USAGE_ENABLED
+#define ll_sb_any_quota_active(sb) sb_any_quota_loaded(sb)
+#elif defined(HAVE_SB_ANY_QUOTA_ACTIVE)
#define ll_sb_any_quota_active(sb) sb_any_quota_active(sb)
#else
#define ll_sb_any_quota_active(sb) sb_any_quota_enabled(sb)
#define ll_sb_any_quota_active(sb) sb_any_quota_active(sb)
#else
#define ll_sb_any_quota_active(sb) sb_any_quota_enabled(sb)
static inline int
ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
{
static inline int
ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
{
if (sb->s_qcop->quota_on) {
if (sb->s_qcop->quota_on) {
- return sb->s_qcop->quota_on(sb, off, ver, name
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ struct path path;
+
+ rc = kern_path(name, LOOKUP_FOLLOW, &path);
+ if (!rc)
+ return rc;
+#endif
+ rc = sb->s_qcop->quota_on(sb, off, ver
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ , &path
+#else
+ , name
+#endif
#ifdef HAVE_QUOTA_ON_5ARGS
, remount
#endif
);
#ifdef HAVE_QUOTA_ON_5ARGS
, remount
#endif
);
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ path_put(&path);
+#endif
+ return rc;
#define HAVE_NODE_TO_CPUMASK
#endif
#define HAVE_NODE_TO_CPUMASK
#endif
+#ifndef QUOTA_OK
+# define QUOTA_OK 0
+#endif
+#ifndef NO_QUOTA
+# define NO_QUOTA (-EDQUOT)
+#endif
+
+#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
+# define ext2_set_bit __test_and_set_bit_le
+# define ext2_clear_bit __test_and_clear_bit_le
+# define ext2_test_bit test_bit_le
+# define ext2_find_first_zero_bit find_first_zero_bit_le
+# define ext2_find_next_zero_bit find_next_zero_bit_le
+#endif
+
#endif /* __KERNEL__ */
#endif /* _COMPAT25_H */
#endif /* __KERNEL__ */
#endif /* _COMPAT25_H */
cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
cfs_spin_unlock(&obd_zombie_impexp_lock);
cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
cfs_spin_unlock(&obd_zombie_impexp_lock);
- if (obd_zombie_impexp_notify != NULL)
- obd_zombie_impexp_notify();
+ obd_zombie_impexp_notify();
cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
cfs_spin_unlock(&obd_zombie_impexp_lock);
cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
cfs_spin_unlock(&obd_zombie_impexp_lock);
- if (obd_zombie_impexp_notify != NULL)
- obd_zombie_impexp_notify();
+ obd_zombie_impexp_notify();