X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flinux%2Flustre_compat25.h;h=2007a74fb632e9e16ed8171da6a47e2fb8454034;hb=6a5b0b1ce3bbf5be2a8babba8619b95346edb24f;hp=e294e3f6e25cb068d35abf643997678fdb54c328;hpb=0ff1bfc429c3e9d4fced60212e37a8a996586175;p=fs%2Flustre-release.git diff --git a/lustre/include/linux/lustre_compat25.h b/lustre/include/linux/lustre_compat25.h index e294e3f..2007a74 100644 --- a/lustre/include/linux/lustre_compat25.h +++ b/lustre/include/linux/lustre_compat25.h @@ -182,8 +182,14 @@ static inline int cleanup_group_info(void) #include -#ifndef HAVE___D_REHASH -#define __d_rehash(dentry, lock) d_rehash_cond(dentry, lock) +#if !defined(HAVE_D_REHASH_COND) && defined(HAVE___D_REHASH) +#define d_rehash_cond(dentry, lock) __d_rehash(dentry, lock) +extern void __d_rehash(struct dentry *dentry, int lock); +#endif + +#if !defined(HAVE_D_MOVE_LOCKED) && defined(HAVE___D_MOVE) +#define d_move_locked(dentry, target) __d_move(dentry, target) +extern void __d_move(struct dentry *dentry, struct dentry *target); #endif #ifdef HAVE_CAN_SLEEP_ARG @@ -336,5 +342,207 @@ generic_file_write(struct file *filp, const char __user *buf, size_t len, loff_t #define p_pptr parent #endif +#ifndef HAVE_SB_TIME_GRAN +#ifndef HAVE_S_TIME_GRAN +#error Need s_time_gran patch! +#endif +static inline u32 get_sb_time_gran(struct super_block *sb) +{ + return sb->s_time_gran; +} +#endif + +#ifdef HAVE_RW_TREE_LOCK +#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock) +#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock) +#else +#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock) +#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock) +#endif + +#ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT +#define ll_unregister_blkdev(a,b) unregister_blkdev((a),(b)) +#else +static inline +int ll_unregister_blkdev(unsigned int dev, const char *name) +{ + unregister_blkdev(dev, name); + return 0; +} +#endif + +#ifdef HAVE_INVALIDATE_BDEV_2ARG +#define ll_invalidate_bdev(a,b) invalidate_bdev((a),(b)) +#else +#define ll_invalidate_bdev(a,b) invalidate_bdev((a)) +#endif + +#ifdef HAVE_INODE_BLKSIZE +#define ll_inode_blksize(a) (a)->i_blksize +#else +#define ll_inode_blksize(a) (1<<(a)->i_blkbits) +#endif + + +#ifdef FS_ODD_RENAME +#define FS_RENAME_DOES_D_MOVE FS_ODD_RENAME +#endif + +/* add a lustre compatible layer for crypto API */ +#include +#ifdef HAVE_ASYNC_BLOCK_CIPHER +#define ll_crypto_hash crypto_hash +#define ll_crypto_cipher crypto_blkcipher +#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask) +#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen) +#define ll_crypto_hash_init(desc) crypto_hash_init(desc) +#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes) +#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out) +#define ll_crypto_alloc_blkcipher(name, type, mask) \ + crypto_alloc_blkcipher(name ,type, mask) +#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \ + crypto_blkcipher_setkey(tfm, key, keylen) +#define ll_crypto_blkcipher_set_iv(tfm, src, len) \ + crypto_blkcipher_set_iv(tfm, src, len) +#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \ + crypto_blkcipher_get_iv(tfm, dst, len) +#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \ + crypto_blkcipher_encrypt(desc, dst, src, bytes) +#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \ + crypto_blkcipher_decrypt(desc, dst, src, bytes) +#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \ + crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) +#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \ + crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) + +static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm, + u8 *key, unsigned int *keylen, + struct scatterlist *sg, + unsigned int size, u8 *result) +{ + struct hash_desc desc; + int rv; + desc.tfm = tfm; + desc.flags = 0; + rv = crypto_hash_setkey(desc.tfm, key, *keylen); + if (rv) { + CERROR("failed to hash setkey: %d\n", rv); + return rv; + } + return crypto_hash_digest(&desc, sg, size, result); +} +static inline +unsigned int crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize; +} +static inline +unsigned int crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize; +} + +#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm) +#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm) +#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm) +#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm) +#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm) +#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm) +#else /* HAVE_ASYNC_BLOCK_CIPHER */ +#include +#define ll_crypto_hash crypto_tfm +#define ll_crypto_cipher crypto_tfm +struct hash_desc { + struct ll_crypto_hash *tfm; + u32 flags; +}; +struct blkcipher_desc { + struct ll_crypto_cipher *tfm; + void *info; + u32 flags; +}; +#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \ + crypto_cipher_setkey(tfm, key, keylen) +#define ll_crypto_blkcipher_set_iv(tfm, src, len) \ + crypto_cipher_set_iv(tfm, src, len) +#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \ + crypto_cipher_get_iv(tfm, dst, len) +#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \ + crypto_cipher_encrypt((desc)->tfm, dst, src, bytes) +#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \ + crypto_cipher_decrypt((desc)->tfm, dst, src, bytes) +#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \ + crypto_cipher_decrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info) +#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \ + crypto_cipher_encrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info) + +extern struct ll_crypto_cipher *ll_crypto_alloc_blkcipher( + const char * algname, u32 type, u32 mask); +static inline +struct ll_crypto_hash *ll_crypto_alloc_hash(const char *alg, u32 type, u32 mask) +{ + char buf[CRYPTO_MAX_ALG_NAME + 1]; + const char *pan = alg; + + if (strncmp("hmac(", alg, 5) == 0) { + char *vp = strnchr(alg, CRYPTO_MAX_ALG_NAME, ')'); + if (vp) { + memcpy(buf, alg+ 5, vp - alg- 5); + buf[vp - alg - 5] = 0x00; + pan = buf; + } + } + return crypto_alloc_tfm(pan, 0); +} +static inline int ll_crypto_hash_init(struct hash_desc *desc) +{ + crypto_digest_init(desc->tfm); return 0; +} +static inline int ll_crypto_hash_update(struct hash_desc *desc, + struct scatterlist *sg, + unsigned int nbytes) +{ + struct scatterlist *sl = sg; + unsigned int count; + /* + * This way is very weakness. We must ensure that + * the sum of sg[0..i]->length isn't greater than nbytes. + * In the upstream kernel the crypto_hash_update() also + * via the nbytes computed the count of sg[...]. + * The old style is more safely. but it gone. + */ + for (count = 0; nbytes > 0; count ++, sl ++) { + nbytes -= sl->length; + } + crypto_digest_update(desc->tfm, sg, count); return 0; +} +static inline int ll_crypto_hash_final(struct hash_desc *desc, u8 *out) +{ + crypto_digest_final(desc->tfm, out); return 0; +} +static inline int ll_crypto_hmac(struct crypto_tfm *tfm, + u8 *key, unsigned int *keylen, + struct scatterlist *sg, + unsigned int nbytes, + u8 *out) +{ + struct scatterlist *sl = sg; + int count; + for (count = 0; nbytes > 0; count ++, sl ++) { + nbytes -= sl->length; + } + crypto_hmac(tfm, key, keylen, sg, count, out); + return 0; +} + +#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_digest_setkey(tfm, key, keylen) +#define ll_crypto_blkcipher_blocksize(tfm) crypto_tfm_alg_blocksize(tfm) +#define ll_crypto_blkcipher_ivsize(tfm) crypto_tfm_alg_ivsize(tfm) +#define ll_crypto_hash_digestsize(tfm) crypto_tfm_alg_digestsize(tfm) +#define ll_crypto_hash_blocksize(tfm) crypto_tfm_alg_blocksize(tfm) +#define ll_crypto_free_hash(tfm) crypto_free_tfm(tfm) +#define ll_crypto_free_blkcipher(tfm) crypto_free_tfm(tfm) +#endif /* HAVE_ASYNC_BLOCK_CIPHER */ + #endif /* __KERNEL__ */ #endif /* _COMPAT25_H */