+#ifdef HAVE_INODE_BLKSIZE
+#define ll_inode_blksize(a) (a)->i_blksize
+#else
+#define ll_inode_blksize(a) (1<<(a)->i_blkbits)
+#endif
+
+#ifdef HAVE_FS_RENAME_DOES_D_MOVE
+#define LL_RENAME_DOES_D_MOVE FS_RENAME_DOES_D_MOVE
+#else
+#define LL_RENAME_DOES_D_MOVE FS_ODD_RENAME
+#endif
+
+#ifndef HAVE_D_OBTAIN_ALIAS
+/* The old d_alloc_anon() didn't free the inode reference on error
+ * like d_obtain_alias(). Hide that difference/inconvenience here. */
+static inline struct dentry *d_obtain_alias(struct inode *inode)
+{
+ struct dentry *anon = d_alloc_anon(inode);
+
+ if (anon == NULL)
+ iput(inode);
+
+ return anon;
+}
+#endif
+
+/* add a lustre compatible layer for crypto API */
+#include <linux/crypto.h>
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+#define ll_crypto_hash crypto_hash
+#define ll_crypto_cipher crypto_blkcipher
+#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask)
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
+#define ll_crypto_hash_init(desc) crypto_hash_init(desc)
+#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes)
+#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out)
+#define ll_crypto_alloc_blkcipher(name, type, mask) \
+ crypto_alloc_blkcipher(name ,type, mask)
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+ crypto_blkcipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+ crypto_blkcipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+ crypto_blkcipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+ crypto_blkcipher_encrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+ crypto_blkcipher_decrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+ crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+ crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
+
+static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int size, u8 *result)
+{
+ struct hash_desc desc;
+ int rv;
+ desc.tfm = tfm;
+ desc.flags = 0;
+ rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+ if (rv) {
+ CERROR("failed to hash setkey: %d\n", rv);
+ return rv;
+ }
+ return crypto_hash_digest(&desc, sg, size, result);
+}
+static inline
+unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
+}
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
+#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm)
+#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm)
+#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm)
+#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm)
+#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm)
+#else /* HAVE_ASYNC_BLOCK_CIPHER */
+#include <linux/scatterlist.h>
+#define ll_crypto_hash crypto_tfm
+#define ll_crypto_cipher crypto_tfm
+#ifndef HAVE_STRUCT_HASH_DESC
+struct hash_desc {
+ struct ll_crypto_hash *tfm;
+ u32 flags;
+};
+#endif
+#ifndef HAVE_STRUCT_BLKCIPHER_DESC
+struct blkcipher_desc {
+ struct ll_crypto_cipher *tfm;
+ void *info;
+ u32 flags;
+};
+#endif
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+ crypto_cipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+ crypto_cipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+ crypto_cipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+ crypto_cipher_encrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+ crypto_cipher_decrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+ crypto_cipher_decrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+ crypto_cipher_encrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+
+static inline
+struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char * algname,
+ u32 type, u32 mask)
+{
+ char buf[CRYPTO_MAX_ALG_NAME + 1];
+ const char *pan = algname;
+ u32 flag = 0;
+
+ if (strncmp("cbc(", algname, 4) == 0)
+ flag |= CRYPTO_TFM_MODE_CBC;
+ else if (strncmp("ecb(", algname, 4) == 0)
+ flag |= CRYPTO_TFM_MODE_ECB;
+ if (flag) {
+ char *vp = strnchr(algname, CRYPTO_MAX_ALG_NAME, ')');
+ if (vp) {
+ memcpy(buf, algname + 4, vp - algname - 4);
+ buf[vp - algname - 4] = '\0';
+ pan = buf;
+ } else {
+ flag = 0;
+ }
+ }
+ return crypto_alloc_tfm(pan, flag);
+}
+
+static inline
+struct ll_crypto_hash *ll_crypto_alloc_hash(const char *alg, u32 type, u32 mask)
+{
+ char buf[CRYPTO_MAX_ALG_NAME + 1];
+ const char *pan = alg;
+
+ if (strncmp("hmac(", alg, 5) == 0) {
+ char *vp = strnchr(alg, CRYPTO_MAX_ALG_NAME, ')');
+ if (vp) {
+ memcpy(buf, alg+ 5, vp - alg- 5);
+ buf[vp - alg - 5] = 0x00;
+ pan = buf;
+ }
+ }
+ return crypto_alloc_tfm(pan, 0);
+}
+static inline int ll_crypto_hash_init(struct hash_desc *desc)
+{
+ crypto_digest_init(desc->tfm); return 0;
+}
+static inline int ll_crypto_hash_update(struct hash_desc *desc,
+ struct scatterlist *sg,
+ unsigned int nbytes)
+{
+ struct scatterlist *sl = sg;
+ unsigned int count;
+ /*
+ * This way is very weakness. We must ensure that
+ * the sum of sg[0..i]->length isn't greater than nbytes.
+ * In the upstream kernel the crypto_hash_update() also
+ * via the nbytes computed the count of sg[...].
+ * The old style is more safely. but it gone.
+ */
+ for (count = 0; nbytes > 0; count ++, sl ++) {
+ nbytes -= sl->length;
+ }
+ crypto_digest_update(desc->tfm, sg, count); return 0;
+}
+static inline int ll_crypto_hash_final(struct hash_desc *desc, u8 *out)
+{
+ crypto_digest_final(desc->tfm, out); return 0;
+}
+static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int nbytes,
+ u8 *out)
+{
+ struct scatterlist *sl = sg;
+ int count;
+ for (count = 0; nbytes > 0; count ++, sl ++) {
+ nbytes -= sl->length;
+ }
+ crypto_hmac(tfm, key, keylen, sg, count, out);
+ return 0;
+}
+
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_digest_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_blocksize(tfm) crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm) crypto_tfm_alg_ivsize(tfm)
+#define ll_crypto_hash_digestsize(tfm) crypto_tfm_alg_digestsize(tfm)
+#define ll_crypto_hash_blocksize(tfm) crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_free_hash(tfm) crypto_free_tfm(tfm)
+#define ll_crypto_free_blkcipher(tfm) crypto_free_tfm(tfm)
+#define ll_crypto_tfm_alg_min_keysize crypto_tfm_alg_min_keysize
+#define ll_crypto_tfm_alg_max_keysize crypto_tfm_alg_max_keysize
+#endif /* HAVE_ASYNC_BLOCK_CIPHER */
+
+#ifndef HAVE_SYNCHRONIZE_RCU
+/* Linux 2.6.32 provides define when !CONFIG_TREE_PREEMPT_RCU */
+#ifndef synchronize_rcu
+#define synchronize_rcu() synchronize_kernel()
+#endif
+#endif
+
+#ifdef HAVE_FILE_REMOVE_SUID
+# define ll_remove_suid(file, mnt) file_remove_suid(file)
+#else
+# ifdef HAVE_SECURITY_PLUG
+# define ll_remove_suid(file,mnt) remove_suid(file->f_dentry,mnt)
+# else
+# define ll_remove_suid(file,mnt) remove_suid(file->f_dentry)
+# endif
+#endif
+
+#ifdef HAVE_SECURITY_PLUG
+#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry,mnt)
+#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mnt,mode)
+#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,mnt,dir,new,mnt1)
+#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry,mnt)
+#define ll_vfs_mknod(dir,entry,mnt,mode,dev) \
+ vfs_mknod(dir,entry,mnt,mode,dev)
+#define ll_security_inode_unlink(dir,entry,mnt) \
+ security_inode_unlink(dir,entry,mnt)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
+ vfs_rename(old,old_dir,mnt,new,new_dir,mnt1)
+#else
+#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
+#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
+#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
+#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry)
+#define ll_vfs_mknod(dir,entry,mnt,mode,dev) vfs_mknod(dir,entry,mode,dev)
+#define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
+ vfs_rename(old,old_dir,new,new_dir)
+#endif /* HAVE_SECURITY_PLUG */
+
+#ifdef for_each_possible_cpu
+#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#elif defined(for_each_cpu)
+#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
+#endif
+
+#ifndef cpu_to_node
+#define cpu_to_node(cpu) 0
+#endif
+
+#ifdef HAVE_REGISTER_SHRINKER
+typedef int (*cfs_shrinker_t)(SHRINKER_FIRST_ARG int nr_to_scan, gfp_t gfp_mask);
+
+static inline
+struct shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
+{
+ struct shrinker *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return (NULL);
+
+ s->shrink = func;
+ s->seeks = seek;
+
+ register_shrinker(s);
+
+ return s;
+}
+
+static inline
+void cfs_remove_shrinker(struct shrinker *shrinker)
+{
+ if (shrinker == NULL)
+ return;
+
+ unregister_shrinker(shrinker);
+ kfree(shrinker);
+}
+#endif
+
+#ifdef HAVE_BIO_ENDIO_2ARG
+#define cfs_bio_io_error(a,b) bio_io_error((a))
+#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
+#else
+#define cfs_bio_io_error(a,b) bio_io_error((a),(b))
+#define cfs_bio_endio(a,b,c) bio_endio((a),(b),(c))
+#endif
+
+#ifdef HAVE_FS_STRUCT_USE_PATH
+#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
+#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
+#define cfs_path_put(nd) path_put(&(nd)->path)
+#else
+#define cfs_fs_pwd(fs) ((fs)->pwd)
+#define cfs_fs_mnt(fs) ((fs)->pwdmnt)
+#define cfs_path_put(nd) path_release(nd)
+#endif
+
+#ifndef abs
+static inline int abs(int x)
+{
+ return (x < 0) ? -x : x;
+}
+#endif
+
+#ifndef labs
+static inline long labs(long x)
+{
+ return (x < 0) ? -x : x;
+}
+#endif /* HAVE_REGISTER_SHRINKER */
+
+#ifdef HAVE_INVALIDATE_INODE_PAGES
+#define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
+#endif
+
+#ifdef HAVE_INODE_IPRIVATE
+#define INODE_PRIVATE_DATA(inode) ((inode)->i_private)
+#else
+#define INODE_PRIVATE_DATA(inode) ((inode)->u.generic_ip)
+#endif
+
+#ifndef SLAB_DESTROY_BY_RCU
+#define CFS_SLAB_DESTROY_BY_RCU 0
+#else
+#define CFS_SLAB_DESTROY_BY_RCU SLAB_DESTROY_BY_RCU
+#endif
+
+#ifdef HAVE_SB_HAS_QUOTA_ACTIVE
+#define ll_sb_has_quota_active(sb, type) sb_has_quota_active(sb, type)
+#else
+#define ll_sb_has_quota_active(sb, type) sb_has_quota_enabled(sb, type)
+#endif
+
+#ifdef HAVE_SB_ANY_QUOTA_ACTIVE
+#define ll_sb_any_quota_active(sb) sb_any_quota_active(sb)
+#else
+#define ll_sb_any_quota_active(sb) sb_any_quota_enabled(sb)
+#endif
+
+static inline int
+ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
+{
+ if (sb->s_qcop->quota_on) {
+ return sb->s_qcop->quota_on(sb, off, ver, name
+#ifdef HAVE_QUOTA_ON_5ARGS
+ , remount
+#endif
+ );
+ }
+ else
+ return -ENOSYS;
+}
+
+static inline int ll_quota_off(struct super_block *sb, int off, int remount)
+{
+ if (sb->s_qcop->quota_off) {
+ return sb->s_qcop->quota_off(sb, off
+#ifdef HAVE_QUOTA_OFF_3ARGS
+ , remount
+#endif
+ );
+ }
+ else
+ return -ENOSYS;
+}
+
+#ifndef HAVE_BLK_QUEUE_LOG_BLK_SIZE /* added in 2.6.31 */
+#define blk_queue_logical_block_size(q, sz) blk_queue_hardsect_size(q, sz)
+#endif
+
+#ifndef HAVE_VFS_DQ_OFF
+# define ll_vfs_dq_init DQUOT_INIT
+# define ll_vfs_dq_drop DQUOT_DROP
+# define ll_vfs_dq_transfer DQUOT_TRANSFER
+# define ll_vfs_dq_off(sb, remount) DQUOT_OFF(sb)
+#else
+# define ll_vfs_dq_init vfs_dq_init
+# define ll_vfs_dq_drop vfs_dq_drop
+# define ll_vfs_dq_transfer vfs_dq_transfer
+# define ll_vfs_dq_off(sb, remount) vfs_dq_off(sb, remount)
+#endif
+
+#ifdef HAVE_BDI_INIT
+#define ll_bdi_init(bdi) bdi_init(bdi)
+#define ll_bdi_destroy(bdi) bdi_destroy(bdi)
+#else
+#define ll_bdi_init(bdi) 0
+#define ll_bdi_destroy(bdi) do { } while(0)
+#endif
+
+#ifdef HAVE_NEW_BACKING_DEV_INFO
+# define ll_bdi_wb_cnt(bdi) ((bdi).wb_cnt)
+#else
+# define ll_bdi_wb_cnt(bdi) 1
+#endif
+
+#ifdef HAVE_BLK_QUEUE_MAX_SECTORS /* removed in rhel6 */
+#define blk_queue_max_hw_sectors(q, sect) blk_queue_max_sectors(q, sect)
+#endif
+
+#ifndef HAVE_REQUEST_QUEUE_LIMITS
+#define queue_max_sectors(rq) ((rq)->max_sectors)
+#define queue_max_hw_sectors(rq) ((rq)->max_hw_sectors)
+#define queue_max_phys_segments(rq) ((rq)->max_phys_segments)
+#define queue_max_hw_segments(rq) ((rq)->max_hw_segments)
+#endif
+
+#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
+#define blk_queue_max_segments(rq, seg) \
+ do { blk_queue_max_phys_segments(rq, seg); \
+ blk_queue_max_hw_segments(rq, seg); } while (0)
+#else
+#define queue_max_phys_segments(rq) queue_max_segments(rq)
+#define queue_max_hw_segments(rq) queue_max_segments(rq)
+#endif
+
+
+#ifndef HAVE_BI_HW_SEGMENTS
+#define bio_hw_segments(q, bio) 0
+#endif
+