[kernel/ksyms.c kernel/sched.c],[
AC_DEFINE(HAVE_SHOW_TASK, 1, [show_task is exported])
],[
+ LB_CHECK_SYMBOL_EXPORT([sched_show_task],
+ [kernel/ksyms.c kernel/sched.c],[
+ AC_DEFINE(HAVE_SCHED_SHOW_TASK, 1, [sched_show_task is exported])
+ ],[])
])
])
#
AC_DEFUN([LIBCFS_FUNC_DUMP_TRACE],
[LB_CHECK_SYMBOL_EXPORT([dump_trace],
-[kernel/ksyms.c arch/${LINUX_ARCH%_64}/kernel/traps_64.c],[
+[kernel/ksyms.c arch/${LINUX_ARCH%_64}/kernel/traps_64.c arch/x86/kernel/dumpstack_32.c arch/x86/kernel/dumpstack_64.c],[
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="-Werror"
AC_MSG_CHECKING([whether we can really use dump_trace])
])
])
-# 2.6.32 without DQUOT_INIT defined.
-AC_DEFUN([LC_DQUOT_INIT],
-[AC_MSG_CHECKING([if DQUOT_INIT is defined])
+# 2.6.32 add a limits member in struct request_queue.
+AC_DEFUN([LC_REQUEST_QUEUE_LIMITS],
+[AC_MSG_CHECKING([if request_queue has a limits field])
LB_LINUX_TRY_COMPILE([
- #include <linux/quotaops.h>
+ #include <linux/blkdev.h>
],[
- DQUOT_INIT(NULL);
+ struct request_queue rq;
+ rq.limits.io_min = 0;
],[
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_DQUOT_INIT, 1,
- [DQUOT_INIT is defined])
+ AC_DEFINE(HAVE_REQUEST_QUEUE_LIMITS, 1,
+ [request_queue has a limits field])
],[
AC_MSG_RESULT(no)
])
])
-# 2.6.32 add a limits member in struct request_queue.
-AC_DEFUN([LC_REQUEST_QUEUE_LIMITS],
-[AC_MSG_CHECKING([if request_queue has a limits field])
+# RHEL6(backport from 2.6.34) removes 2 functions blk_queue_max_phys_segments and
+# blk_queue_max_hw_segments add blk_queue_max_segments
+AC_DEFUN([LC_BLK_QUEUE_MAX_SEGMENTS],
+[AC_MSG_CHECKING([if blk_queue_max_segments is defined])
LB_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
],[
- struct request_queue rq;
- rq.limits.io_min = 0;
+ blk_queue_max_segments(NULL, 0);
],[
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_REQUEST_QUEUE_LIMITS, 1,
- [request_queue has a limits field])
+ AC_DEFINE(HAVE_BLK_QUEUE_MAX_SEGMENTS, 1,
+ [blk_queue_max_segments is defined])
+],[
+ AC_MSG_RESULT(no)
+])
+])
+
+# RHEL6(backport from 2.6.34) removes blk_queue_max_sectors and add blk_queue_max_hw_sectors
+# check blk_queue_max_sectors and use it until disappear.
+AC_DEFUN([LC_BLK_QUEUE_MAX_SECTORS],
+[AC_MSG_CHECKING([if blk_queue_max_sectors is defined])
+LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+],[
+ blk_queue_max_sectors(NULL, 0);
+],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_QUEUE_MAX_SECTORS, 1,
+ [blk_queue_max_sectors is defined])
],[
AC_MSG_RESULT(no)
])
LC_BLK_QUEUE_LOG_BLK_SIZE
# 2.6.32
- LC_DQUOT_INIT
LC_REQUEST_QUEUE_LIMITS
LC_NEW_BACKING_DEV_INFO
+ LC_BLK_QUEUE_MAX_SECTORS
+ LC_BLK_QUEUE_MAX_SEGMENTS
#
if test x$enable_server = xyes ; then
#define LTIME_S(time) (time.tv_sec)
#define ll_path_lookup path_lookup
+
+#ifdef HAVE_EXPORT_INODE_PERMISSION
+#define ll_permission(inode,mask,nd) inode_permission(inode,mask)
+#else
#define ll_permission(inode,mask,nd) permission(inode,mask,nd)
+#endif
#define ll_pgcache_lock(mapping) cfs_spin_lock(&mapping->page_lock)
#define ll_pgcache_unlock(mapping) cfs_spin_unlock(&mapping->page_lock)
#define blk_queue_logical_block_size(q, sz) blk_queue_hardsect_size(q, sz)
#endif
-#ifdef HAVE_DQUOT_INIT
-# define ll_vfs_dq_init DQUOT_INIT
-# define ll_vfs_dq_drop DQUOT_DROP
+#ifndef HAVE_VFS_DQ_OFF
+# define ll_vfs_dq_init DQUOT_INIT
+# define ll_vfs_dq_drop DQUOT_DROP
+# define ll_vfs_dq_transfer DQUOT_TRANSFER
+# define ll_vfs_dq_off(sb, remount) DQUOT_OFF(sb)
#else
-# define ll_vfs_dq_init vfs_dq_init
-# define ll_vfs_dq_drop vfs_dq_drop
+# define ll_vfs_dq_init vfs_dq_init
+# define ll_vfs_dq_drop vfs_dq_drop
+# define ll_vfs_dq_transfer vfs_dq_transfer
+# define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
#endif
#ifdef HAVE_BDI_INIT
#endif
#ifdef HAVE_NEW_BACKING_DEV_INFO
-#define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
+# define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
#else
-#define ll_bdi_wb_cnt(bdi) 1
+# define ll_bdi_wb_cnt(bdi) 1
+#endif
+
+#ifdef HAVE_BLK_QUEUE_MAX_SECTORS /* removed in rhel6 */
+#define blk_queue_max_hw_sectors(q, sect) blk_queue_max_sectors(q, sect)
+#endif
+
+#ifndef HAVE_REQUEST_QUEUE_LIMITS
+#define queue_max_sectors(rq) ((rq)->max_sectors)
+#define queue_max_hw_sectors(rq) ((rq)->max_hw_sectors)
+#define queue_max_phys_segments(rq) ((rq)->max_phys_segments)
+#define queue_max_hw_segments(rq) ((rq)->max_hw_segments)
+#endif
+
+#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
+#define blk_queue_max_segments(rq, seg) \
+ do { blk_queue_max_phys_segments(rq, seg); \
+ blk_queue_max_hw_segments(rq, seg); } while (0)
+#else
+#define queue_max_phys_segments(rq) queue_max_segments(rq)
+#define queue_max_hw_segments(rq) queue_max_segments(rq)
+#endif
+
+
+#ifndef HAVE_BI_HW_SEGMENTS
+#define bio_hw_segments(q, bio) 0
#endif
#endif /* __KERNEL__ */
typedef int (*dqacq_handler_t) (struct obd_device * obd, struct qunit_data * qd,
int opc);
-/*
-#ifdef HAVE_VFS_DQ_OFF
-#define LL_DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
-#else
-#define LL_DQUOT_OFF(sb, remount) DQUOT_OFF(sb)
-#endif
-*/
-
-#define LL_DQUOT_OFF(sb) DQUOT_OFF(sb)
-
-
/* user quota is turned on on filter */
#define LQC_USRQUOTA_FLAG (1 << 0)
/* group quota is turned on on filter */
CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
blk_queue_logical_block_size(lo->lo_queue,
(unsigned short)CFS_PAGE_SIZE);
- blk_queue_max_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
- blk_queue_max_phys_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
- blk_queue_max_hw_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
+ blk_queue_max_hw_sectors(lo->lo_queue,
+ LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
+ blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
bd_set_size(bdev, size << 9);
static int v3_write_dqheader(struct file *f, int type)
{
static const __u32 quota_magics[] = V2_INITQMAGICS;
- static const __u32 quota_versions[] = V2_INITQVERSIONS_R1;
+ static const __u32 quota_versions[] = LUSTRE_INITQVERSIONS_V2;
struct v2_disk_dqheader dqhead;
loff_t offset = 0;
/* we don't really need to take the group lock here,
* but it may be useful if one day we support online
* quotacheck */
+#ifdef HAVE_EXT4_LDISKFS
+ ext4_lock_group(sb, group);
+#else
spin_lock(sb_bgl_lock(sbi, group));
+#endif
if (desc->bg_flags & cpu_to_le16(EXT3_BG_INODE_UNINIT)) {
/* no inode in use in this group, just skip it */
+#ifdef HAVE_EXT4_LDISKFS
+ ext3_unlock_group(sb, group);
+#else
spin_unlock(sb_bgl_lock(sbi, group));
+#endif
continue;
}
+
used_count -= ext3_itable_unused_count(sb, desc);
+#ifdef HAVE_EXT4_LDISKFS
+ ext3_unlock_group(sb, group);
+#else
spin_unlock(sb_bgl_lock(sbi, group));
+#endif
}
ino = group * sbi->s_inodes_per_group + 1;
#ifndef _MDD_INTERNAL_H
#define _MDD_INTERNAL_H
-#include <asm/semaphore.h>
-
#include <lustre_acl.h>
#include <lustre_eacl.h>
#include <obd.h>
mds_init_ctxt(obd, mnt);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- dentry = simple_mkdir(current->fs->pwd, mnt, "OBJECTS", 0777, 1);
+ dentry = simple_mkdir(cfs_fs_pwd(current->fs), mnt, "OBJECTS", 0777, 1);
if (IS_ERR(dentry)) {
rc = PTR_ERR(dentry);
CERROR("cannot create OBJECTS directory: rc = %d\n", rc);
}
mds->mds_objects_dir = dentry;
- dentry = ll_lookup_one_len("__iopen__", current->fs->pwd,
+ dentry = ll_lookup_one_len("__iopen__", cfs_fs_pwd(current->fs),
strlen("__iopen__"));
if (IS_ERR(dentry)) {
rc = PTR_ERR(dentry);
}
dput(mds->mds_fid_de);
- LL_DQUOT_OFF(obd->u.obt.obt_sb);
+ ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
shrink_dcache_sb(mds->mds_obt.obt_sb);
fsfilt_put_ops(obd->obd_fsops);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
/* Setup the configs dir */
- dentry = simple_mkdir(current->fs->pwd, mnt, MOUNT_CONFIGS_DIR, 0777, 1);
+ dentry = simple_mkdir(cfs_fs_pwd(current->fs), mnt, MOUNT_CONFIGS_DIR, 0777, 1);
if (IS_ERR(dentry)) {
rc = PTR_ERR(dentry);
CERROR("cannot create %s directory: rc = %d\n",
/* Need the iopen dir for fid2dentry, required by
LLOG_ORIGIN_HANDLE_READ_HEADER */
- dentry = ll_lookup_one_len("__iopen__", current->fs->pwd,
+ dentry = ll_lookup_one_len("__iopen__", cfs_fs_pwd(current->fs),
strlen("__iopen__"));
if (IS_ERR(dentry)) {
rc = PTR_ERR(dentry);
loff_t off = 0;
ENTRY;
- O_dentry = simple_mkdir(current->fs->pwd, obd->u.obt.obt_vfsmnt,
+ O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), obd->u.obt.obt_vfsmnt,
"O", 0700, 1);
CDEBUG(D_INODE, "got/created O: %p\n", O_dentry);
if (IS_ERR(O_dentry)) {
__u8 *uuid_ptr;
char *str, *label;
char ns_name[48];
- request_queue_t *q;
+ struct request_queue *q;
int rc, i;
ENTRY;
GOTO(err_post, rc);
q = bdev_get_queue(mnt->mnt_sb->s_bdev);
- if (q->max_sectors < q->max_hw_sectors &&
- q->max_sectors < PTLRPC_MAX_BRW_SIZE >> 9)
+ if (queue_max_sectors(q) < queue_max_hw_sectors(q) &&
+ queue_max_sectors(q) < PTLRPC_MAX_BRW_SIZE >> 9)
LCONSOLE_INFO("%s: underlying device %s should be tuned "
"for larger I/O requests: max_sectors = %u "
"could be up to max_hw_sectors=%u\n",
obd->obd_name, mnt->mnt_sb->s_id,
- q->max_sectors, q->max_hw_sectors);
+ queue_max_sectors(q), queue_max_hw_sectors(q));
uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
if (uuid_ptr != NULL) {
filter_post(obd);
- LL_DQUOT_OFF(obd->u.obt.obt_sb);
+ ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
shrink_dcache_sb(obd->u.obt.obt_sb);
server_put_mount(obd->obd_name, obd->u.obt.obt_vfsmnt);
cfs_waitq_signal(&iobuf->dr_wait);
}
+#ifdef HAVE_BIO_ENDIO_2ARG
+#define DIO_RETURN(a)
+static void dio_complete_routine(struct bio *bio, int error)
+#else
+#define DIO_RETURN(a) return(a)
static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
+#endif
{
struct filter_iobuf *iobuf = bio->bi_private;
struct bio_vec *bvl;
bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
bio->bi_private);
- return 0;
+ DIO_RETURN(0);
}
/* the check is outside of the cycle for performance reason -bzzz */
* deadlocking the OST. The bios are now released as soon as complete
* so the pool cannot be exhausted while IOs are competing. bug 10076 */
bio_put(bio);
- return 0;
+ DIO_RETURN(0);
}
static int can_be_merged(struct bio *bio, sector_t sector)
continue; /* added this frag OK */
if (bio != NULL) {
- request_queue_t *q =
+ struct request_queue *q =
bdev_get_queue(bio->bi_bdev);
/* Dang! I have to fragment this I/O */
"sector %llu next %llu\n",
bio->bi_size,
bio->bi_vcnt, bio->bi_max_vecs,
- bio->bi_size >> 9, q->max_sectors,
+ bio->bi_size >> 9, queue_max_sectors(q),
bio_phys_segments(q, bio),
- q->max_phys_segments,
+ queue_max_phys_segments(q),
bio_hw_segments(q, bio),
- q->max_hw_segments,
+ queue_max_hw_segments(q),
(unsigned long long)bio->bi_sector,
(unsigned long long)sector);
if ((tc = prepare_creds())) {
tc->fsuid = uc->mu_fsuid;
tc->fsgid = uc->mu_fsgid;
- tc->cap_effective = uc->mu_cap;
commit_creds(tc);
}
+ /* XXX not suboptimal */
+ cfs_curproc_cap_unpack(uc->mu_cap);
}
static inline void
iattr.ia_uid = attr->la_uid;
iattr.ia_gid = attr->la_gid;
osd_push_ctxt(env, save);
- rc = DQUOT_TRANSFER(inode, &iattr) ? -EDQUOT : 0;
+ rc = ll_vfs_dq_transfer(inode, &iattr) ? -EDQUOT : 0;
osd_pop_ctxt(save);
if (rc != 0)
return rc;
struct osd_thandle *oh;
ssize_t result = 0;
#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = current->cap_effective;
+ cfs_cap_t save = cfs_curproc_cap_pack();
#endif
LASSERT(handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
#ifdef HAVE_QUOTA_SUPPORT
if (ignore_quota)
- current->cap_effective |= CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
else
- current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
#endif
/* Write small symlink to inode body as we need to maintain correct
* on-disk symlinks for ldiskfs.
buf->lb_len, pos,
oh->ot_handle);
#ifdef HAVE_QUOTA_SUPPORT
- current->cap_effective = save;
+ cfs_curproc_cap_unpack(save);
#endif
if (result == 0)
result = buf->lb_len;
struct osd_thandle *oh;
struct iam_container *bag = &obj->oo_dir->od_container;
#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = current->cap_effective;
+ cfs_cap_t save = cfs_curproc_cap_pack();
#endif
struct osd_thread_info *oti = osd_oti_get(env);
struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
LASSERT(oh->ot_handle->h_transaction != NULL);
#ifdef HAVE_QUOTA_SUPPORT
if (ignore_quota)
- current->cap_effective |= CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
else
- current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
#endif
if (S_ISDIR(obj->oo_inode->i_mode))
osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
iam_rec, ipd);
#ifdef HAVE_QUOTA_SUPPORT
- current->cap_effective = save;
+ cfs_curproc_cap_unpack(save);
#endif
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
const char *name = (const char *)key;
struct osd_object *child;
#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = current->cap_effective;
+ cfs_cap_t save = cfs_curproc_cap_pack();
#endif
int rc;
*mtime = inode->i_mtime;
#ifdef HAVE_QUOTA_SUPPORT
if (ignore_quota)
- current->cap_effective |= CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
else
- current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
#endif
cfs_down_write(&obj->oo_ext_idx_sem);
rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
cfs_up_write(&obj->oo_ext_idx_sem);
#ifdef HAVE_QUOTA_SUPPORT
- current->cap_effective = save;
+ cfs_curproc_cap_unpack(save);
#endif
osd_object_put(env, child);
/* xtime should not be updated with server-side time. */
struct osd_ctxt {
__u32 oc_uid;
__u32 oc_gid;
- __u32 oc_cap;
+ cfs_kernel_cap_t oc_cap;
};
#endif