-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (c) 2003 Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
*
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
-#ifndef _COMPAT25_H
-#define _COMPAT25_H
+#ifndef _LINUX_COMPAT25_H
+#define _LINUX_COMPAT25_H
#ifdef __KERNEL__
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
-#error sorry, lustre requires at least 2.5.69
-#endif
-
-#include <linux/portals_compat25.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <libcfs/linux/portals_compat25.h>
-/*
- * groups_info related staff
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
-
-#define NGROUPS_SMALL NGROUPS
-#define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t)))
-struct group_info {
- int ngroups;
- atomic_t usage;
- gid_t small_block[NGROUPS_SMALL];
- int nblocks;
- gid_t *blocks[0];
-};
-#define current_ngroups current->ngroups
-
-struct group_info *groups_alloc(int gidsetsize);
-void groups_free(struct group_info *ginfo);
-int groups_search(struct group_info *ginfo, gid_t grp);
-
-#define get_group_info(group_info) \
- do { \
- atomic_inc(&(group_info)->usage); \
- } while (0)
-
-#define put_group_info(group_info) \
- do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
- } while (0)
+#include <linux/lustre_patchless_compat.h>
-#define groups_sort(gi) do {} while (0)
-
-#define GROUP_AT(gi, i) ((gi)->small_block[(i)])
+#ifdef HAVE_FS_STRUCT_RWLOCK
+# define LOCK_FS_STRUCT(fs) cfs_write_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) cfs_write_unlock(&(fs)->lock)
+#else
+# define LOCK_FS_STRUCT(fs) cfs_spin_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) cfs_spin_unlock(&(fs)->lock)
+#endif
-static inline int cleanup_group_info(void)
+#ifdef HAVE_FS_STRUCT_USE_PATH
+static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+ struct dentry *dentry)
{
- /* Get rid of unneeded supplementary groups */
- current->ngroups = 0;
- memset(current->groups, 0, sizeof(current->groups));
- return 0;
+ struct path path;
+ struct path old_pwd;
+
+ path.mnt = mnt;
+ path.dentry = dentry;
+ LOCK_FS_STRUCT(fs);
+ old_pwd = fs->pwd;
+ path_get(&path);
+ fs->pwd = path;
+ UNLOCK_FS_STRUCT(fs);
+
+ if (old_pwd.dentry)
+ path_put(&old_pwd);
}
-#else /* >= 2.6.4 */
-
-#define current_ngroups current->group_info->ngroups
-
-void groups_sort(struct group_info *ginfo);
-int groups_search(struct group_info *ginfo, gid_t grp);
+#else
-static inline int cleanup_group_info(void)
+static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+ struct dentry *dentry)
{
- struct group_info *ginfo;
-
- ginfo = groups_alloc(0);
- if (!ginfo)
- return -ENOMEM;
-
- set_current_groups(ginfo);
- put_group_info(ginfo);
-
- return 0;
+ struct dentry *old_pwd;
+ struct vfsmount *old_pwdmnt;
+
+ LOCK_FS_STRUCT(fs);
+ old_pwd = fs->pwd;
+ old_pwdmnt = fs->pwdmnt;
+ fs->pwdmnt = mntget(mnt);
+ fs->pwd = dget(dentry);
+ UNLOCK_FS_STRUCT(fs);
+
+ if (old_pwd) {
+ dput(old_pwd);
+ mntput(old_pwdmnt);
+ }
}
-#endif /* end of groups_info stuff */
+#endif
+/*
+ * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
+ * ATTR_* attributes (see bug 13828)
+ */
+#define ATTR_BLOCKS (1 << 27)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+#define current_ngroups current_cred()->group_info->ngroups
+#define current_groups current_cred()->group_info->small_block
/*
* OBD need working random driver, thus all our
#define module_init(a) late_initcall(a)
#endif
-/* XXX our code should be using the 2.6 calls, not the other way around */
-#define TryLockPage(page) TestSetPageLocked(page)
-#define filemap_fdatasync(mapping) filemap_fdatawrite(mapping)
-#define Page_Uptodate(page) PageUptodate(page)
-
-#define KDEVT_INIT(val) (val)
+#ifndef HAVE_TRYLOCK_PAGE
+#define trylock_page(page) (!TestSetPageLocked(page))
+#endif
#define LTIME_S(time) (time.tv_sec)
-#define ll_path_lookup path_lookup
+
+#ifdef HAVE_EXPORT_INODE_PERMISSION
+#define ll_permission(inode,mask,nd) inode_permission(inode,mask)
+#else
#define ll_permission(inode,mask,nd) permission(inode,mask,nd)
+#endif
-#define ll_pgcache_lock(mapping) spin_lock(&mapping->page_lock)
-#define ll_pgcache_unlock(mapping) spin_unlock(&mapping->page_lock)
+#ifdef HAVE_GENERIC_PERMISSION_2ARGS
+# define ll_generic_permission(inode, mask, flags, check_acl) \
+ generic_permission(inode, mask)
+#elif defined HAVE_GENERIC_PERMISSION_4ARGS
+# define ll_generic_permission(inode, mask, flags, check_acl) \
+ generic_permission(inode, mask, flags, check_acl)
+#else
+# define ll_generic_permission(inode, mask, flags, check_acl) \
+ generic_permission(inode, mask, check_acl)
+#endif
+
+#define ll_pgcache_lock(mapping) cfs_spin_lock(&mapping->page_lock)
+#define ll_pgcache_unlock(mapping) cfs_spin_unlock(&mapping->page_lock)
#define ll_call_writepage(inode, page) \
(inode)->i_mapping->a_ops->writepage(page, NULL)
#define ll_invalidate_inode_pages(inode) \
invalidate_inode_pages((inode)->i_mapping)
-#define ll_truncate_complete_page(page) \
- truncate_complete_page(page->mapping, page)
-
-#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)
+#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c,d)
#define ll_dev_t dev_t
#define kdev_t dev_t
#define to_kdev_t(dev) (dev)
#define kdev_t_to_nr(dev) (dev)
#define val_to_kdev(dev) (dev)
-#define ILOOKUP(sb, ino, test, data) ilookup5(sb, ino, test, data);
+
+#ifdef HAVE_BLKDEV_PUT_2ARGS
+#define ll_blkdev_put(a, b) blkdev_put(a, b)
+#else
+#define ll_blkdev_put(a, b) blkdev_put(a)
+#endif
+
+#ifdef HAVE_DENTRY_OPEN_4ARGS
+#define ll_dentry_open(a, b, c, d) dentry_open(a, b, c, d)
+#else
+#define ll_dentry_open(a, b, c, d) dentry_open(a, b, c)
+#endif
#include <linux/writeback.h>
-static inline void lustre_daemonize_helper(void)
+static inline int cfs_cleanup_group_info(void)
{
- LASSERT(current->signal != NULL);
- current->signal->session = 1;
- if (current->group_leader)
- current->group_leader->signal->pgrp = 1;
- else
- CERROR("we aren't group leader\n");
- current->signal->tty = NULL;
+ struct group_info *ginfo;
+
+ ginfo = groups_alloc(0);
+ if (!ginfo)
+ return -ENOMEM;
+
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
+
+ return 0;
}
#define __set_page_ll_data(page, llap) \
do { \
page_cache_get(page); \
SetPagePrivate(page); \
- page->private = (unsigned long)llap; \
+ set_page_private(page, (unsigned long)llap); \
} while (0)
#define __clear_page_ll_data(page) \
do { \
ClearPagePrivate(page); \
+ set_page_private(page, 0); \
page_cache_release(page); \
- page->private = 0; \
} while(0)
-#define smp_num_cpus NR_CPUS
-
#define kiobuf bio
#include <linux/proc_fs.h>
-#else /* 2.4.. */
+#define CheckWriteback(page, cmd) \
+ ((!PageWriteback(page) && (cmd & OBD_BRW_READ)) || \
+ (PageWriteback(page) && (cmd & OBD_BRW_WRITE)))
-#define ll_vfs_create(a,b,c,d) vfs_create(a,b,c)
-#define ll_permission(inode,mask,nd) permission(inode,mask)
-#define ILOOKUP(sb, ino, test, data) ilookup4(sb, ino, test, data);
-#define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
-#define ll_dev_t int
+#ifdef HAVE_PAGE_LIST
+static inline int mapping_has_pages(struct address_space *mapping)
+{
+ int rc = 1;
-static inline void clear_page_dirty(struct page *page)
+ ll_pgcache_lock(mapping);
+ if (cfs_list_empty(&mapping->dirty_pages) &&
+ cfs_list_empty(&mapping->clean_pages) &&
+ cfs_list_empty(&mapping->locked_pages)) {
+ rc = 0;
+ }
+ ll_pgcache_unlock(mapping);
+
+ return rc;
+}
+#else
+static inline int mapping_has_pages(struct address_space *mapping)
{
- if (PageDirty(page))
- ClearPageDirty(page);
+ return mapping->nrpages > 0;
}
+#endif
+
+#ifdef HAVE_KIOBUF_KIO_BLOCKS
+#define KIOBUF_GET_BLOCKS(k) ((k)->kio_blocks)
+#else
+#define KIOBUF_GET_BLOCKS(k) ((k)->blocks)
+#endif
-/* 2.5 uses hlists for some things, like the d_hash. we'll treat them
- * as 2.5 and let macros drop back.. */
-#ifndef HLIST_HEAD /* until we get a kernel newer than l28 */
-#define hlist_entry list_entry
-#define hlist_head list_head
-#define hlist_node list_head
-#define HLIST_HEAD LIST_HEAD
-#define INIT_HLIST_HEAD INIT_LIST_HEAD
-#define hlist_del_init list_del_init
-#define hlist_add_head list_add
-#define hlist_for_each_safe list_for_each_safe
-#endif
-#define KDEVT_INIT(val) (val)
-#define ext3_xattr_set_handle ext3_xattr_set
-#define extN_xattr_set_handle extN_xattr_set
-#define try_module_get __MOD_INC_USE_COUNT
-#define module_put __MOD_DEC_USE_COUNT
-#define LTIME_S(time) (time)
-#if !defined(CONFIG_RH_2_4_20) && !defined(cpu_online)
-#define cpu_online(cpu) (cpu_online_map & (1<<cpu))
-#endif
-
-static inline int ll_path_lookup(const char *path, unsigned flags,
- struct nameidata *nd)
+#ifdef HAVE_SECURITY_PLUG
+#define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
+ vfs_symlink(dir, dentry, mnt, path, mode)
+#else
+#ifdef HAVE_4ARGS_VFS_SYMLINK
+#define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
+ vfs_symlink(dir, dentry, path, mode)
+#else
+#define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
+ vfs_symlink(dir, dentry, path)
+#endif
+
+#endif
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+#endif
+
+#define UP_WRITE_I_ALLOC_SEM(i) up_write(&(i)->i_alloc_sem)
+#define DOWN_WRITE_I_ALLOC_SEM(i) down_write(&(i)->i_alloc_sem)
+#define LASSERT_I_ALLOC_SEM_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
+
+#define UP_READ_I_ALLOC_SEM(i) up_read(&(i)->i_alloc_sem)
+#define DOWN_READ_I_ALLOC_SEM(i) down_read(&(i)->i_alloc_sem)
+#define LASSERT_I_ALLOC_SEM_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
+
+#include <linux/mpage.h> /* for generic_writepages */
+
+#ifdef HAVE_HIDE_VFSMOUNT_GUTS
+# include <../fs/mount.h>
+#else
+# define real_mount(mnt) (mnt)
+#endif
+
+static inline const char *mnt_get_devname(struct vfsmount *mnt)
{
- int error = 0;
- if (path_init(path, flags, nd))
- error = path_walk(path, nd);
- return error;
+ return real_mount(mnt)->mnt_devname;
}
-#define ll_permission(inode,mask,nd) permission(inode,mask)
-typedef long sector_t;
-#define ll_pgcache_lock(mapping) spin_lock(&pagecache_lock)
-#define ll_pgcache_unlock(mapping) spin_unlock(&pagecache_lock)
-#define ll_call_writepage(inode, page) \
- (inode)->i_mapping->a_ops->writepage(page)
-#define ll_invalidate_inode_pages(inode) invalidate_inode_pages(inode)
-#define ll_truncate_complete_page(page) truncate_complete_page(page)
+#ifndef HAVE_ATOMIC_MNT_COUNT
+static inline unsigned int mnt_get_count(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+ unsigned int count = 0;
+ int cpu;
-static inline void __d_drop(struct dentry *dentry)
+ for_each_possible_cpu(cpu) {
+ count += per_cpu_ptr(real_mount(mnt)->mnt_pcp, cpu)->mnt_count;
+ }
+
+ return count;
+#else
+ return real_mount(mnt)->mnt_count;
+#endif
+}
+#else
+# define mnt_get_count(mnt) cfs_atomic_read(&(real_mount(mnt)->mnt_count))
+#endif
+
+#ifdef HAVE_RW_TREE_LOCK
+#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
+#else
+#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
+#endif
+
+#ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
+#define ll_unregister_blkdev(a,b) unregister_blkdev((a),(b))
+#else
+static inline
+int ll_unregister_blkdev(unsigned int dev, const char *name)
{
- list_del(&dentry->d_hash);
- INIT_LIST_HEAD(&dentry->d_hash);
+ unregister_blkdev(dev, name);
+ return 0;
}
+#endif
+
+#ifdef HAVE_INVALIDATE_BDEV_2ARG
+#define ll_invalidate_bdev(a,b) invalidate_bdev((a),(b))
+#else
+#define ll_invalidate_bdev(a,b) invalidate_bdev((a))
+#endif
+
+#define ll_inode_blksize(a) (1<<(a)->i_blkbits)
+
+#ifdef HAVE_FS_RENAME_DOES_D_MOVE
+#define LL_RENAME_DOES_D_MOVE FS_RENAME_DOES_D_MOVE
+#else
+#define LL_RENAME_DOES_D_MOVE FS_ODD_RENAME
+#endif
-static inline void lustre_daemonize_helper(void)
+#ifndef HAVE_D_OBTAIN_ALIAS
+/* The old d_alloc_anon() didn't free the inode reference on error
+ * like d_obtain_alias(). Hide that difference/inconvenience here. */
+static inline struct dentry *d_obtain_alias(struct inode *inode)
{
- current->session = 1;
- current->pgrp = 1;
- current->tty = NULL;
+ struct dentry *anon = d_alloc_anon(inode);
+
+ if (anon == NULL) {
+ iput(inode);
+ anon = ERR_PTR(-ENOMEM);
+ }
+
+ return anon;
}
+#endif
-#ifndef HAVE_COND_RESCHED
-static inline void cond_resched(void)
+/* add a lustre compatible layer for crypto API */
+#include <linux/crypto.h>
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+#define ll_crypto_hash crypto_hash
+#define ll_crypto_cipher crypto_blkcipher
+#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask)
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
+#define ll_crypto_hash_init(desc) crypto_hash_init(desc)
+#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes)
+#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out)
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+ crypto_blkcipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+ crypto_blkcipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+ crypto_blkcipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+ crypto_blkcipher_encrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+ crypto_blkcipher_decrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+ crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+ crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
+
+static inline
+struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char *name,
+ u32 type, u32 mask)
{
- if (unlikely(need_resched())) {
- set_current_state(TASK_RUNNING);
- schedule();
+ struct ll_crypto_cipher *rtn = crypto_alloc_blkcipher(name, type, mask);
+
+ return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
+}
+
+static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int size, u8 *result)
+{
+ struct hash_desc desc;
+ int rv;
+ desc.tfm = tfm;
+ desc.flags = 0;
+ rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+ if (rv) {
+ CERROR("failed to hash setkey: %d\n", rv);
+ return rv;
}
+ return crypto_hash_digest(&desc, sg, size, result);
+}
+static inline
+unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
+}
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
}
+
+#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm)
+#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm)
+#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm)
+#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm)
+#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm)
+#else /* HAVE_ASYNC_BLOCK_CIPHER */
+#include <linux/scatterlist.h>
+#define ll_crypto_hash crypto_tfm
+#define ll_crypto_cipher crypto_tfm
+#ifndef HAVE_STRUCT_HASH_DESC
+struct hash_desc {
+ struct ll_crypto_hash *tfm;
+ u32 flags;
+};
+#endif
+#ifndef HAVE_STRUCT_BLKCIPHER_DESC
+struct blkcipher_desc {
+ struct ll_crypto_cipher *tfm;
+ void *info;
+ u32 flags;
+};
#endif
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+ crypto_cipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+ crypto_cipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+ crypto_cipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+ crypto_cipher_encrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+ crypto_cipher_decrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+ crypto_cipher_decrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+ crypto_cipher_encrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+
+static inline
+struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char * algname,
+ u32 type, u32 mask)
+{
+ struct ll_crypto_cipher *rtn;
+ char buf[CRYPTO_MAX_ALG_NAME + 1];
+ const char *pan = algname;
+ u32 flag = 0;
+
+ if (strncmp("cbc(", algname, 4) == 0)
+ flag |= CRYPTO_TFM_MODE_CBC;
+ else if (strncmp("ecb(", algname, 4) == 0)
+ flag |= CRYPTO_TFM_MODE_ECB;
+ if (flag) {
+ char *vp = strnchr(algname, CRYPTO_MAX_ALG_NAME, ')');
+ if (vp) {
+ memcpy(buf, algname + 4, vp - algname - 4);
+ buf[vp - algname - 4] = '\0';
+ pan = buf;
+ } else {
+ flag = 0;
+ }
+ }
+ rtn = crypto_alloc_tfm(pan, flag);
+ return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
+}
-static inline int mapping_mapped(struct address_space *mapping)
+static inline
+struct ll_crypto_hash *ll_crypto_alloc_hash(const char *alg, u32 type, u32 mask)
{
- return mapping->i_mmap_shared ? 1 : 0;
+ char buf[CRYPTO_MAX_ALG_NAME + 1];
+ const char *pan = alg;
+
+ if (strncmp("hmac(", alg, 5) == 0) {
+ char *vp = strnchr(alg, CRYPTO_MAX_ALG_NAME, ')');
+ if (vp) {
+ memcpy(buf, alg+ 5, vp - alg- 5);
+ buf[vp - alg - 5] = 0x00;
+ pan = buf;
+ }
+ }
+ return crypto_alloc_tfm(pan, 0);
+}
+static inline int ll_crypto_hash_init(struct hash_desc *desc)
+{
+ crypto_digest_init(desc->tfm); return 0;
+}
+static inline int ll_crypto_hash_update(struct hash_desc *desc,
+ struct scatterlist *sg,
+ unsigned int nbytes)
+{
+ struct scatterlist *sl = sg;
+ unsigned int count;
+ /*
+ * This way is very weakness. We must ensure that
+ * the sum of sg[0..i]->length isn't greater than nbytes.
+ * In the upstream kernel the crypto_hash_update() also
+ * via the nbytes computed the count of sg[...].
+ * The old style is more safely. but it gone.
+ */
+ for (count = 0; nbytes > 0; count ++, sl ++) {
+ nbytes -= sl->length;
+ }
+ crypto_digest_update(desc->tfm, sg, count); return 0;
+}
+static inline int ll_crypto_hash_final(struct hash_desc *desc, u8 *out)
+{
+ crypto_digest_final(desc->tfm, out); return 0;
+}
+static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int nbytes,
+ u8 *out)
+{
+ struct scatterlist *sl = sg;
+ int count;
+ for (count = 0; nbytes > 0; count ++, sl ++) {
+ nbytes -= sl->length;
+ }
+ crypto_hmac(tfm, key, keylen, sg, count, out);
+ return 0;
}
-/* to find proc_dir_entry from inode. 2.6 has native one -bzzz */
-#ifndef HAVE_PDE
-#define PDE(ii) ((ii)->u.generic_ip)
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_digest_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_blocksize(tfm) crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm) crypto_tfm_alg_ivsize(tfm)
+#define ll_crypto_hash_digestsize(tfm) crypto_tfm_alg_digestsize(tfm)
+#define ll_crypto_hash_blocksize(tfm) crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_free_hash(tfm) crypto_free_tfm(tfm)
+#define ll_crypto_free_blkcipher(tfm) crypto_free_tfm(tfm)
+#define ll_crypto_tfm_alg_min_keysize crypto_tfm_alg_min_keysize
+#define ll_crypto_tfm_alg_max_keysize crypto_tfm_alg_max_keysize
+#endif /* HAVE_ASYNC_BLOCK_CIPHER */
+
+#ifdef HAVE_SECURITY_PLUG
+#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry,mnt)
+#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mnt,mode)
+#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,mnt,dir,new,mnt1)
+#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry,mnt)
+#define ll_vfs_mknod(dir,entry,mnt,mode,dev) \
+ vfs_mknod(dir,entry,mnt,mode,dev)
+#define ll_security_inode_unlink(dir,entry,mnt) \
+ security_inode_unlink(dir,entry,mnt)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
+ vfs_rename(old,old_dir,mnt,new,new_dir,mnt1)
+#else
+#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
+#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
+#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
+#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry)
+#define ll_vfs_mknod(dir,entry,mnt,mode,dev) vfs_mknod(dir,entry,mode,dev)
+#define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
+ vfs_rename(old,old_dir,new,new_dir)
+#endif /* HAVE_SECURITY_PLUG */
+
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
#endif
-#define __set_page_ll_data(page, llap) page->private = (unsigned long)llap
-#define __clear_page_ll_data(page) page->private = 0
-#define PageWriteback(page) 0
-#define end_page_writeback(page)
+#ifdef HAVE_BIO_ENDIO_2ARG
+#define cfs_bio_io_error(a,b) bio_io_error((a))
+#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
+#else
+#define cfs_bio_io_error(a,b) bio_io_error((a),(b))
+#define cfs_bio_endio(a,b,c) bio_endio((a),(b),(c))
+#endif
-#ifdef ZAP_PAGE_RANGE_VMA
-#define ll_zap_page_range(vma, addr, len) zap_page_range(vma, addr, len)
+#ifdef HAVE_FS_STRUCT_USE_PATH
+#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
+#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
+#define cfs_path_put(nd) path_put(&(nd)->path)
#else
-#define ll_zap_page_range(vma, addr, len) zap_page_range(vma->vm_mm, addr, len)
+#define cfs_fs_pwd(fs) ((fs)->pwd)
+#define cfs_fs_mnt(fs) ((fs)->pwdmnt)
+#define cfs_path_put(nd) path_release(nd)
#endif
-#endif /* end of 2.4 compat macros */
+#ifndef HAVE_SIMPLE_SETATTR
+#define simple_setattr(dentry, ops) inode_setattr((dentry)->d_inode, ops)
+#endif
-#ifdef HAVE_PAGE_LIST
-static inline int mapping_has_pages(struct address_space *mapping)
+#ifndef SLAB_DESTROY_BY_RCU
+#define SLAB_DESTROY_BY_RCU 0
+#endif
+
+#ifndef HAVE_SB_HAS_QUOTA_ACTIVE
+#define sb_has_quota_active(sb, type) sb_has_quota_enabled(sb, type)
+#endif
+
+#ifndef HAVE_SB_ANY_QUOTA_LOADED
+# ifdef HAVE_SB_ANY_QUOTA_ACTIVE
+# define sb_any_quota_loaded(sb) sb_any_quota_active(sb)
+# else
+# define sb_any_quota_loaded(sb) sb_any_quota_enabled(sb)
+# endif
+#endif
+
+static inline int
+ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
{
- int rc = 1;
+ int rc;
- ll_pgcache_lock(mapping);
- if (list_empty(&mapping->dirty_pages) &&
- list_empty(&mapping->clean_pages) &&
- list_empty(&mapping->locked_pages)) {
- rc = 0;
- }
- ll_pgcache_unlock(mapping);
+ if (sb->s_qcop->quota_on) {
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ struct path path;
- return rc;
+ rc = kern_path(name, LOOKUP_FOLLOW, &path);
+ if (!rc)
+ return rc;
+#endif
+ rc = sb->s_qcop->quota_on(sb, off, ver
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ , &path
+#else
+ , name
+#endif
+#ifdef HAVE_QUOTA_ON_5ARGS
+ , remount
+#endif
+ );
+#ifdef HAVE_QUOTA_ON_USE_PATH
+ path_put(&path);
+#endif
+ return rc;
+ }
+ else
+ return -ENOSYS;
}
-static inline int clear_page_dirty_for_io(struct page *page)
+static inline int ll_quota_off(struct super_block *sb, int off, int remount)
{
- struct address_space *mapping = page->mapping;
-
- if (page->mapping && PageDirty(page)) {
- ClearPageDirty(page);
- ll_pgcache_lock(mapping);
- list_del(&page->list);
- list_add(&page->list, &mapping->locked_pages);
- ll_pgcache_unlock(mapping);
- return 1;
+ if (sb->s_qcop->quota_off) {
+ return sb->s_qcop->quota_off(sb, off
+#ifdef HAVE_QUOTA_OFF_3ARGS
+ , remount
+#endif
+ );
}
- return 0;
+ else
+ return -ENOSYS;
}
+
+#ifndef HAVE_BLK_QUEUE_LOG_BLK_SIZE /* added in 2.6.31 */
+#define blk_queue_logical_block_size(q, sz) blk_queue_hardsect_size(q, sz)
+#endif
+
+#ifndef HAVE_VFS_DQ_OFF
+# define ll_vfs_dq_init DQUOT_INIT
+# define ll_vfs_dq_drop DQUOT_DROP
+# define ll_vfs_dq_transfer DQUOT_TRANSFER
+# define ll_vfs_dq_off(sb, remount) DQUOT_OFF(sb)
#else
-static inline int mapping_has_pages(struct address_space *mapping)
-{
- return mapping->nrpages > 0;
-}
+# define ll_vfs_dq_init vfs_dq_init
+# define ll_vfs_dq_drop vfs_dq_drop
+# define ll_vfs_dq_transfer vfs_dq_transfer
+# define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))
-#define ll_set_dflags(dentry, flags) do { dentry->d_vfs_flags |= flags; } while(0)
-#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path)
+#ifdef HAVE_BDI_INIT
+#define ll_bdi_init(bdi) bdi_init(bdi)
+#define ll_bdi_destroy(bdi) bdi_destroy(bdi)
#else
-#define ll_set_dflags(dentry, flags) do { \
- spin_lock(&dentry->d_lock); \
- dentry->d_flags |= flags; \
- spin_unlock(&dentry->d_lock); \
- } while(0)
-#define ll_vfs_symlink(dir, dentry, path, mode) vfs_symlink(dir, dentry, path, mode)
+#define ll_bdi_init(bdi) 0
+#define ll_bdi_destroy(bdi) do { } while(0)
+#endif
+
+#ifdef HAVE_NEW_BACKING_DEV_INFO
+# define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
+#else
+# define ll_bdi_wb_cnt(bdi) 1
#endif
-#ifdef HAVE_I_ALLOC_SEM
-#define UP_WRITE_I_ALLOC_SEM(i) do { up_write(&(i)->i_alloc_sem); } while (0)
-#define DOWN_WRITE_I_ALLOC_SEM(i) do { down_write(&(i)->i_alloc_sem); } while(0)
-#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_read_trylock(&(i)->i_alloc_sem) == 0)
+#ifdef HAVE_BLK_QUEUE_MAX_SECTORS /* removed in rhel6 */
+#define blk_queue_max_hw_sectors(q, sect) blk_queue_max_sectors(q, sect)
+#endif
-#define UP_READ_I_ALLOC_SEM(i) do { up_read(&(i)->i_alloc_sem); } while (0)
-#define DOWN_READ_I_ALLOC_SEM(i) do { down_read(&(i)->i_alloc_sem); } while (0)
-#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_write_trylock(&(i)->i_alloc_sem) == 0)
-#define MDS_PACK_MD_LOCK 1
+#ifndef HAVE_BLKDEV_GET_BY_DEV
+# define blkdev_get_by_dev(dev, mode, holder) open_by_devnum(dev, mode)
+#endif
+
+#ifndef HAVE_REQUEST_QUEUE_LIMITS
+#define queue_max_sectors(rq) ((rq)->max_sectors)
+#define queue_max_hw_sectors(rq) ((rq)->max_hw_sectors)
+#define queue_max_phys_segments(rq) ((rq)->max_phys_segments)
+#define queue_max_hw_segments(rq) ((rq)->max_hw_segments)
+#endif
+
+#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
+#define blk_queue_max_segments(rq, seg) \
+ do { blk_queue_max_phys_segments(rq, seg); \
+ blk_queue_max_hw_segments(rq, seg); } while (0)
+#else
+#define queue_max_phys_segments(rq) queue_max_segments(rq)
+#define queue_max_hw_segments(rq) queue_max_segments(rq)
+#endif
+
+
+#ifndef HAVE_BI_HW_SEGMENTS
+#define bio_hw_segments(q, bio) 0
+#endif
+
+#ifndef HAVE_PAGEVEC_LRU_ADD_FILE
+#define pagevec_lru_add_file pagevec_lru_add
+#endif
+
+#ifdef HAVE_ADD_TO_PAGE_CACHE_LRU
+#define ll_add_to_page_cache_lru(pg, mapping, off, gfp) \
+ add_to_page_cache_lru(pg, mapping, off, gfp)
+#define ll_pagevec_init(pv, cold) do {} while (0)
+#define ll_pagevec_add(pv, pg) (0)
+#define ll_pagevec_lru_add_file(pv) do {} while (0)
+#else
+#define ll_add_to_page_cache_lru(pg, mapping, off, gfp) \
+ add_to_page_cache(pg, mapping, off, gfp)
+#define ll_pagevec_init(pv, cold) pagevec_init(&lru_pvec, cold);
+#define ll_pagevec_add(pv, pg) \
+({ \
+ int __ret; \
+ \
+ page_cache_get(pg); \
+ __ret = pagevec_add(pv, pg); \
+})
+#define ll_pagevec_lru_add_file(pv) pagevec_lru_add_file(pv)
+#endif
+
+#if !defined(HAVE_NODE_TO_CPUMASK) && defined(HAVE_CPUMASK_OF_NODE)
+#define node_to_cpumask(i) (*(cpumask_of_node(i)))
+#define HAVE_NODE_TO_CPUMASK
+#endif
+
+#ifndef QUOTA_OK
+# define QUOTA_OK 0
+#endif
+#ifndef NO_QUOTA
+# define NO_QUOTA (-EDQUOT)
+#endif
+
+#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
+# define ext2_set_bit __test_and_set_bit_le
+# define ext2_clear_bit __test_and_clear_bit_le
+# define ext2_test_bit test_bit_le
+# define ext2_find_first_zero_bit find_first_zero_bit_le
+# define ext2_find_next_zero_bit find_next_zero_bit_le
+#endif
+
+#ifdef ATTR_TIMES_SET
+# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
#else
-#define UP_READ_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0)
-#define DOWN_READ_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0)
-#define LASSERT_MDS_ORPHAN_READ_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0)
-
-#define UP_WRITE_I_ALLOC_SEM(i) do { up(&(i)->i_sem); } while (0)
-#define DOWN_WRITE_I_ALLOC_SEM(i) do { down(&(i)->i_sem); } while (0)
-#define LASSERT_MDS_ORPHAN_WRITE_LOCKED(i) LASSERT(down_trylock(&(i)->i_sem) != 0)
-#define MDS_PACK_MD_LOCK 0
+# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
+#endif
+
+#ifndef HAVE_SELINUX_IS_ENABLED
+static inline bool selinux_is_enabled(void)
+{
+ return 0;
+}
#endif
#endif /* __KERNEL__ */