Whamcloud - gitweb
Branch HEAD
authoryangsheng <yangsheng>
Thu, 27 Mar 2008 07:47:06 +0000 (07:47 +0000)
committeryangsheng <yangsheng>
Thu, 27 Mar 2008 07:47:06 +0000 (07:47 +0000)
b=14576
i=eric.mei
i=adilger

Wrap the crypto api to adapt upstream kernel changes and keep compatible to old style.

lustre/autoconf/lustre-core.m4
lustre/include/linux/lustre_compat25.h
lustre/obdclass/capa.c
lustre/ptlrpc/gss/gss_bulk.c
lustre/ptlrpc/gss/gss_krb5.h
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c

index 1fd2674..772ba1f 100644 (file)
@@ -725,7 +725,9 @@ AC_DEFUN([LC_CONFIG_GSS],
 
        AC_CHECK_LIB([gssapi], [gss_init_sec_context],
                      [GSSAPI_LIBS="$GSSAPI_LDFLAGS -lgssapi"],
-                     [AC_MSG_ERROR([libgssapi is not found, which is required by GSS.])],)
+                     [AC_CHECK_LIB([gssglue], [gss_init_sec_context],
+                                   [GSSAPI_LIBS="$GSSAPI_LDFLAGS -lgssglue"],
+                                   [AC_MSG_ERROR([libgssapi or libgssglue is not found, which is required by GSS.])])],)
 
        AC_SUBST(GSSAPI_LIBS)
 
@@ -1412,6 +1414,23 @@ int main(void)
 CFLAGS="$tmp_flags"
 ])
 
+#
+# check for crypto API 
+#
+AC_DEFUN([LC_ASYNC_BLOCK_CIPHER],
+[AC_MSG_CHECKING([if kernel has block cipher support])
+LB_LINUX_TRY_COMPILE([
+        #include <linux/crypto.h>
+],[
+        int v = CRYPTO_ALG_TYPE_BLKCIPHER;
+],[
+        AC_MSG_RESULT([yes])
+        AC_DEFINE(HAVE_ASYNC_BLOCK_CIPHER, 1, [kernel has block cipher support])
+],[
+        AC_MSG_RESULT([no])
+])
+])
+
 
 #
 # LC_PROG_LINUX
@@ -1518,6 +1537,7 @@ AC_DEFUN([LC_PROG_LINUX],
                  
         # 2.6.22
          LC_INVALIDATE_BDEV_2ARG
+         LC_ASYNC_BLOCK_CIPHER
          # 2.6.23
          LC_UNREGISTER_BLKDEV_RETURN_INT
          LC_KERNEL_SPLICE_READ
index 1d832a5..2007a74 100644 (file)
@@ -388,5 +388,161 @@ int ll_unregister_blkdev(unsigned int dev, const char *name)
 #define FS_RENAME_DOES_D_MOVE FS_ODD_RENAME
 #endif
 
+/* add a lustre compatible layer for crypto API */
+#include <linux/crypto.h>
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+#define ll_crypto_hash          crypto_hash
+#define ll_crypto_cipher        crypto_blkcipher
+#define ll_crypto_alloc_hash(name, type, mask)  crypto_alloc_hash(name, type, mask)
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
+#define ll_crypto_hash_init(desc)               crypto_hash_init(desc)
+#define ll_crypto_hash_update(desc, sl, bytes)  crypto_hash_update(desc, sl, bytes)
+#define ll_crypto_hash_final(desc, out)         crypto_hash_final(desc, out)
+#define ll_crypto_alloc_blkcipher(name, type, mask) \
+                crypto_alloc_blkcipher(name ,type, mask)
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+                crypto_blkcipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+                crypto_blkcipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+                crypto_blkcipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+                crypto_blkcipher_encrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+                crypto_blkcipher_decrypt(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+                crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+                crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
+
+static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
+                                 u8 *key, unsigned int *keylen,
+                                 struct scatterlist *sg,
+                                 unsigned int size, u8 *result)
+{
+        struct hash_desc desc;
+        int              rv;
+        desc.tfm   = tfm;
+        desc.flags = 0;
+        rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+        if (rv) {
+                CERROR("failed to hash setkey: %d\n", rv);
+                return rv;
+        }
+        return crypto_hash_digest(&desc, sg, size, result);
+}
+static inline
+unsigned int crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
+{
+        return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
+}
+static inline
+unsigned int crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+        return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
+#define ll_crypto_hash_blocksize(tfm)       crypto_hash_blocksize(tfm)
+#define ll_crypto_hash_digestsize(tfm)      crypto_hash_digestsize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm)     crypto_blkcipher_ivsize(tfm)
+#define ll_crypto_blkcipher_blocksize(tfm)  crypto_blkcipher_blocksize(tfm)
+#define ll_crypto_free_hash(tfm)            crypto_free_hash(tfm)
+#define ll_crypto_free_blkcipher(tfm)       crypto_free_blkcipher(tfm)
+#else /* HAVE_ASYNC_BLOCK_CIPHER */
+#include <linux/scatterlist.h>
+#define ll_crypto_hash          crypto_tfm
+#define ll_crypto_cipher        crypto_tfm
+struct hash_desc {
+        struct ll_crypto_hash *tfm;
+        u32                    flags;
+};
+struct blkcipher_desc {
+        struct ll_crypto_cipher *tfm;
+        void                    *info;
+        u32                      flags;
+};
+#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
+        crypto_cipher_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
+        crypto_cipher_set_iv(tfm, src, len)
+#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
+        crypto_cipher_get_iv(tfm, dst, len)
+#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
+        crypto_cipher_encrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
+        crypto_cipher_decrypt((desc)->tfm, dst, src, bytes)
+#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
+        crypto_cipher_decrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
+        crypto_cipher_encrypt_iv((desc)->tfm, dst, src, bytes, (desc)->info)
+
+extern struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(
+                            const char * algname, u32 type, u32 mask);
+static inline 
+struct ll_crypto_hash *ll_crypto_alloc_hash(const char *alg, u32 type, u32 mask)
+{
+        char        buf[CRYPTO_MAX_ALG_NAME + 1];
+        const char *pan = alg;
+
+        if (strncmp("hmac(", alg, 5) == 0) {
+                char *vp = strnchr(alg, CRYPTO_MAX_ALG_NAME, ')');
+                if (vp) {
+                        memcpy(buf, alg+ 5, vp - alg- 5);
+                        buf[vp - alg - 5] = 0x00;
+                        pan = buf;
+                }
+        }
+        return crypto_alloc_tfm(pan, 0);
+}
+static inline int ll_crypto_hash_init(struct hash_desc *desc)
+{
+       crypto_digest_init(desc->tfm); return 0;
+}
+static inline int ll_crypto_hash_update(struct hash_desc *desc,
+                                        struct scatterlist *sg,
+                                        unsigned int nbytes)
+{
+        struct scatterlist *sl = sg;
+        unsigned int        count;
+                /* 
+                 * This way is very weakness. We must ensure that
+                 * the sum of sg[0..i]->length isn't greater than nbytes.
+                 * In the upstream kernel the crypto_hash_update() also 
+                 * via the nbytes computed the count of sg[...].
+                 * The old style is more safely. but it gone.
+                 */
+        for (count = 0; nbytes > 0; count ++, sl ++) {
+                nbytes -= sl->length;
+        }
+        crypto_digest_update(desc->tfm, sg, count); return 0;
+}
+static inline int ll_crypto_hash_final(struct hash_desc *desc, u8 *out)
+{
+        crypto_digest_final(desc->tfm, out); return 0;
+}
+static inline int ll_crypto_hmac(struct crypto_tfm *tfm,
+                                 u8 *key, unsigned int *keylen,
+                                 struct scatterlist *sg,
+                                 unsigned int nbytes,
+                                 u8 *out)
+{
+        struct scatterlist *sl = sg;
+        int                 count;
+        for (count = 0; nbytes > 0; count ++, sl ++) {
+                nbytes -= sl->length;
+        }
+        crypto_hmac(tfm, key, keylen, sg, count, out);
+        return 0;
+}
+
+#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_digest_setkey(tfm, key, keylen)
+#define ll_crypto_blkcipher_blocksize(tfm)      crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_blkcipher_ivsize(tfm) crypto_tfm_alg_ivsize(tfm)
+#define ll_crypto_hash_digestsize(tfm)  crypto_tfm_alg_digestsize(tfm)
+#define ll_crypto_hash_blocksize(tfm)   crypto_tfm_alg_blocksize(tfm)
+#define ll_crypto_free_hash(tfm)        crypto_free_tfm(tfm)
+#define ll_crypto_free_blkcipher(tfm)   crypto_free_tfm(tfm)
+#endif /* HAVE_ASYNC_BLOCK_CIPHER */
+
 #endif /* __KERNEL__ */
 #endif /* _COMPAT25_H */
index 3943d55..747ac69 100644 (file)
@@ -231,8 +231,8 @@ struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
 
 int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
 {
-        struct crypto_tfm *tfm;
-        struct capa_hmac_alg *alg;
+        struct ll_crypto_hash *tfm;
+        struct capa_hmac_alg  *alg;
         int keylen;
         struct scatterlist sl = {
                 .page   = virt_to_page(capa),
@@ -247,7 +247,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
 
         alg = &capa_hmac_algs[capa_alg(capa)];
 
-        tfm = crypto_alloc_tfm(alg->ha_name, 0);
+        tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
         if (!tfm) {
                 CERROR("crypto_alloc_tfm failed, check whether your kernel"
                        "has crypto support!\n");
@@ -255,8 +255,8 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
         }
         keylen = alg->ha_keylen;
 
-        crypto_hmac(tfm, key, &keylen, &sl, 1, hmac);
-        crypto_free_tfm(tfm);
+        ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
+        ll_crypto_free_hash(tfm);
 
         return 0;
 }
index e8ede29..7766a06 100644 (file)
@@ -76,18 +76,19 @@ static void buf_to_sl(struct scatterlist *sl,
  * 3. swap the last two ciphertext blocks.
  * 4. truncate to original plaintext size.
  */
-static int cbc_cts_encrypt(struct crypto_tfm *tfm,
-                           struct scatterlist *sld,
-                           struct scatterlist *sls)
+static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
+                           struct scatterlist      *sld,
+                           struct scatterlist      *sls)
 {
         struct scatterlist      slst, sldt;
+        struct blkcipher_desc   desc;
         void                   *data;
         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
         unsigned int            blksize, blks, tail;
         int                     rc;
 
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
         blks = sls->length / blksize;
         tail = sls->length % blksize;
         LASSERT(blks > 0 && tail > 0);
@@ -100,15 +101,17 @@ static int cbc_cts_encrypt(struct crypto_tfm *tfm,
 
         buf_to_sl(&slst, sbuf, blksize);
         buf_to_sl(&sldt, dbuf, blksize);
+        desc.tfm   = tfm;
+        desc.flags = 0;
 
         /* encrypt head */
-        rc = crypto_cipher_encrypt(tfm, sld, sls, sls->length - tail);
+        rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
         if (unlikely(rc)) {
                 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
                 return rc;
         }
         /* encrypt tail */
-        rc = crypto_cipher_encrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
                 return rc;
@@ -142,10 +145,11 @@ static int cbc_cts_encrypt(struct crypto_tfm *tfm,
  * 4. do CBC decryption.
  * 5. truncate to original ciphertext size.
  */
-static int cbc_cts_decrypt(struct crypto_tfm *tfm,
+static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
                            struct scatterlist *sld,
                            struct scatterlist *sls)
 {
+        struct blkcipher_desc   desc;
         struct scatterlist      slst, sldt;
         void                   *data;
         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
@@ -153,14 +157,14 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         unsigned int            blksize, blks, tail;
         int                     rc;
 
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
         blks = sls->length / blksize;
         tail = sls->length % blksize;
         LASSERT(blks > 0 && tail > 0);
 
         /* save current IV, and set IV to zero */
-        crypto_cipher_get_iv(tfm, sbuf, blksize);
-        crypto_cipher_set_iv(tfm, zero_iv, blksize);
+        ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
+        ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
 
         /* D(n) = Decrypt(K, C(n-1)) */
         slst = *sls;
@@ -168,15 +172,17 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         slst.length = blksize;
 
         buf_to_sl(&sldt, dbuf, blksize);
+        desc.tfm   = tfm;
+        desc.flags = 0;
 
-        rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
                 return rc;
         }
 
         /* restore IV */
-        crypto_cipher_set_iv(tfm, sbuf, blksize);
+        ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
 
         data = cfs_kmap(sls->page);
         /* C(n) = C(n) | TAIL(D(n)) */
@@ -191,13 +197,13 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         buf_to_sl(&sldt, dbuf, blksize);
 
         /* decrypt head */
-        rc = crypto_cipher_decrypt(tfm, sld, sls, sls->length - tail);
+        rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
         if (unlikely(rc)) {
                 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
                 return rc;
         }
         /* decrypt tail */
-        rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
                 return rc;
@@ -211,12 +217,14 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         return 0;
 }
 
-static inline int do_cts_tfm(struct crypto_tfm *tfm,
+static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
                              int encrypt,
                              struct scatterlist *sld,
                              struct scatterlist *sls)
 {
+#ifndef HAVE_ASYNC_BLOCK_CIPHER
         LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
+#endif
 
         if (encrypt)
                 return cbc_cts_encrypt(tfm, sld, sls);
@@ -227,33 +235,36 @@ static inline int do_cts_tfm(struct crypto_tfm *tfm,
 /*
  * normal encrypt/decrypt of data of even blocksize
  */
-static inline int do_cipher_tfm(struct crypto_tfm *tfm,
+static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
                                 int encrypt,
                                 struct scatterlist *sld,
                                 struct scatterlist *sls)
 {
+        struct blkcipher_desc desc;
+        desc.tfm   = tfm;
+        desc.flags = 0;
         if (encrypt)
-                return crypto_cipher_encrypt(tfm, sld, sls, sls->length);
+                return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
         else
-                return crypto_cipher_decrypt(tfm, sld, sls, sls->length);
+                return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
 }
 
-static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
+static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
 {
         const struct sptlrpc_ciph_type *ct;
-        struct crypto_tfm              *tfm;
+        struct ll_crypto_cipher        *tfm;
         int                             rc;
 
         /* using ARC4, the only stream cipher in linux for now */
         ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
         LASSERT(ct);
 
-        tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
+        tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
         if (tfm == NULL) {
                 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
                 return NULL;
         }
-        LASSERT(crypto_tfm_alg_blocksize(tfm));
+        LASSERT(ll_crypto_blkcipher_blocksize(tfm));
 
         if (keylen > ct->sct_keysize)
                 keylen = ct->sct_keysize;
@@ -261,10 +272,10 @@ static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
         LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
         LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
 
-        rc = crypto_cipher_setkey(tfm, key, keylen);
+        rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
         if (rc) {
                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
-                crypto_free_tfm(tfm);
+                ll_crypto_free_blkcipher(tfm);
                 return NULL;
         }
 
@@ -277,12 +288,12 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                            struct ptlrpc_bulk_sec_desc *bsd)
 {
         const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
-        struct crypto_tfm  *tfm;
-        struct crypto_tfm  *stfm = NULL; /* backup stream cipher */
-        struct scatterlist  sls, sld, *sldp;
-        unsigned int        blksize, keygen_size;
-        int                 i, rc;
-        __u8                key[CIPHER_MAX_KEYSIZE];
+        struct ll_crypto_cipher  *tfm;
+        struct ll_crypto_cipher  *stfm = NULL; /* backup stream cipher */
+        struct scatterlist        sls, sld, *sldp;
+        unsigned int              blksize, keygen_size;
+        int                       i, rc;
+        __u8                      key[CIPHER_MAX_KEYSIZE];
 
         LASSERT(ct);
 
@@ -298,17 +309,17 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                 return 0;
         }
 
-        tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
+        tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
         if (tfm == NULL) {
                 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
                 return -ENOMEM;
         }
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
 
         LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
         LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
         LASSERT(ct->sct_ivsize == 0 ||
-                crypto_tfm_alg_ivsize(tfm) == ct->sct_ivsize);
+                ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
         LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
         LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
 
@@ -331,7 +342,7 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                 goto out;
         }
 
-        rc = crypto_cipher_setkey(tfm, key, ct->sct_keysize);
+        rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
         if (rc) {
                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
                 goto out;
@@ -339,7 +350,7 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
 
         /* stream cipher doesn't need iv */
         if (blksize > 1)
-                crypto_cipher_set_iv(tfm, zero_iv, blksize);
+                ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
 
         for (i = 0; i < desc->bd_iov_count; i++) {
                 sls.page = desc->bd_iov[i].kiov_page;
@@ -405,9 +416,9 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
 
 out:
         if (stfm)
-                crypto_free_tfm(stfm);
+                ll_crypto_free_blkcipher(stfm);
 
-        crypto_free_tfm(tfm);
+        ll_crypto_free_blkcipher(tfm);
         return rc;
 }
 
index 8cc4d44..cbdd57e 100644 (file)
@@ -77,8 +77,8 @@ struct krb5_header {
 };
 
 struct krb5_keyblock {
-        rawobj_t                kb_key;
-        struct crypto_tfm      *kb_tfm;
+        rawobj_t                 kb_key;
+        struct ll_crypto_cipher *kb_tfm;
 };
 
 struct krb5_ctx {
index c2e068a..3b7da5c 100644 (file)
@@ -96,45 +96,45 @@ struct krb5_enctype {
 static struct krb5_enctype enctypes[] = {
         [ENCTYPE_DES_CBC_RAW] = {               /* des-cbc-md5 */
                 "des-cbc-md5",
-                "des",
+                "cbc(des)",
                 "md5",
-                CRYPTO_TFM_MODE_CBC,
+                0,
                 16,
                 8,
                 0,
         },
         [ENCTYPE_DES3_CBC_RAW] = {              /* des3-hmac-sha1 */
                 "des3-hmac-sha1",
-                "des3_ede",
-                "sha1",
-                CRYPTO_TFM_MODE_CBC,
+                "cbc(des3_ede)",
+                "hmac(sha1)",
+                0,
                 20,
                 8,
                 1,
         },
         [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
                 "aes128-cts-hmac-sha1-96",
-                "aes",
-                "sha1",
-                CRYPTO_TFM_MODE_CBC,
+                "cbc(aes)",
+                "hmac(sha1)",
+                0,
                 12,
                 16,
                 1,
         },
         [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
                 "aes256-cts-hmac-sha1-96",
-                "aes",
-                "sha1",
-                CRYPTO_TFM_MODE_CBC,
+                "cbc(aes)",
+                "hmac(sha1)",
+                0,
                 12,
                 16,
                 1,
         },
         [ENCTYPE_ARCFOUR_HMAC] = {              /* arcfour-hmac-md5 */
                 "arcfour-hmac-md5",
-                "arc4",
-                "md5",
-                CRYPTO_TFM_MODE_ECB,
+                "ecb(arc4)",
+                "hmac(md5)",
+                0,
                 16,
                 8,
                 1,
@@ -154,14 +154,14 @@ static const char * enctype2str(__u32 enctype)
 static
 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
 {
-        kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode);
+        kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
         if (kb->kb_tfm == NULL) {
                 CERROR("failed to alloc tfm: %s, mode %d\n",
                        alg_name, alg_mode);
                 return -1;
         }
 
-        if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+        if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
                 CERROR("failed to set %s key, len %d\n",
                        alg_name, kb->kb_key.len);
                 return -1;
@@ -204,7 +204,7 @@ void keyblock_free(struct krb5_keyblock *kb)
 {
         rawobj_free(&kb->kb_key);
         if (kb->kb_tfm)
-                crypto_free_tfm(kb->kb_tfm);
+                ll_crypto_free_blkcipher(kb->kb_tfm);
 }
 
 static
@@ -538,51 +538,82 @@ void buf_to_sg(struct scatterlist *sg, char *ptr, int len)
 }
 
 static
-__u32 krb5_encrypt(struct crypto_tfm *tfm,
+__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
                    int decrypt,
                    void * iv,
                    void * in,
                    void * out,
                    int length)
 {
-        struct scatterlist sg;
+        struct blkcipher_desc desc;
+        struct scatterlist    sg;
         __u8 local_iv[16] = {0};
         __u32 ret = -EINVAL;
 
         LASSERT(tfm);
+        desc.tfm  = tfm;
+        desc.info = local_iv;
+        desc.flags= 0;
 
-        if (length % crypto_tfm_alg_blocksize(tfm) != 0) {
+        if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
                 CERROR("output length %d mismatch blocksize %d\n",
-                       length, crypto_tfm_alg_blocksize(tfm));
+                       length, ll_crypto_blkcipher_blocksize(tfm));
                 goto out;
         }
 
-        if (crypto_tfm_alg_ivsize(tfm) > 16) {
-                CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm));
+        if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
+                CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
                 goto out;
         }
 
         if (iv)
-                memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
+                memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
 
         memcpy(out, in, length);
         buf_to_sg(&sg, out, length);
 
         if (decrypt)
-                ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv);
+                ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
         else
-                ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv);
+                ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
 
 out:
         return(ret);
 }
 
 static inline
-int krb5_digest_hmac(struct crypto_tfm *tfm,
+int krb5_digest_hmac(struct ll_crypto_hash *tfm,
                      rawobj_t *key,
                      struct krb5_header *khdr,
                      int msgcnt, rawobj_t *msgs,
                      rawobj_t *cksum)
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+{
+        struct hash_desc   desc;
+        struct scatterlist sg[1];
+        int                i;
+
+        ll_crypto_hash_setkey(tfm, key->data, key->len);
+        desc.tfm  = tfm;
+        desc.flags= 0;
+
+        ll_crypto_hash_init(&desc);
+
+        for (i = 0; i < msgcnt; i++) {
+                if (msgs[i].len == 0)
+                        continue;
+                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
+                ll_crypto_hash_update(&desc, sg, msgs[i].len);
+        }
+
+        if (khdr) {
+                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
+                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+        }
+
+        return ll_crypto_hash_final(&desc, cksum->data);
+}
+#else /* HAVE_ASYNC_BLOCK_CIPHER */
 {
         struct scatterlist sg[1];
         __u32              keylen = key->len, i;
@@ -604,34 +635,38 @@ int krb5_digest_hmac(struct crypto_tfm *tfm,
         crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
         return 0;
 }
+#endif /* HAVE_ASYNC_BLOCK_CIPHER */
 
 static inline
-int krb5_digest_norm(struct crypto_tfm *tfm,
+int krb5_digest_norm(struct ll_crypto_hash *tfm,
                      struct krb5_keyblock *kb,
                      struct krb5_header *khdr,
                      int msgcnt, rawobj_t *msgs,
                      rawobj_t *cksum)
 {
+        struct hash_desc   desc;
         struct scatterlist sg[1];
         int                i;
 
         LASSERT(kb->kb_tfm);
+        desc.tfm  = tfm;
+        desc.flags= 0;
 
-        crypto_digest_init(tfm);
+        ll_crypto_hash_init(&desc);
 
         for (i = 0; i < msgcnt; i++) {
                 if (msgs[i].len == 0)
                         continue;
                 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-                crypto_digest_update(tfm, sg, 1);
+                ll_crypto_hash_update(&desc, sg, msgs[i].len);
         }
 
         if (khdr) {
                 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-                crypto_digest_update(tfm, sg, 1);
+                ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
         }
 
-        crypto_digest_final(tfm, cksum->data);
+        ll_crypto_hash_final(&desc, cksum->data);
 
         return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
                             cksum->data, cksum->len);
@@ -648,17 +683,17 @@ __s32 krb5_make_checksum(__u32 enctype,
                          int msgcnt, rawobj_t *msgs,
                          rawobj_t *cksum)
 {
-        struct krb5_enctype *ke = &enctypes[enctype];
-        struct crypto_tfm   *tfm;
-        __u32                code = GSS_S_FAILURE;
-        int                  rc;
+        struct krb5_enctype   *ke = &enctypes[enctype];
+        struct ll_crypto_hash *tfm;
+        __u32                  code = GSS_S_FAILURE;
+        int                    rc;
 
-        if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) {
+        if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
                 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
                 return GSS_S_FAILURE;
         }
 
-        cksum->len = crypto_tfm_alg_digestsize(tfm);
+        cksum->len = ll_crypto_hash_digestsize(tfm);
         OBD_ALLOC(cksum->data, cksum->len);
         if (!cksum->data) {
                 cksum->len = 0;
@@ -675,7 +710,7 @@ __s32 krb5_make_checksum(__u32 enctype,
         if (rc == 0)
                 code = GSS_S_COMPLETE;
 out_tfm:
-        crypto_free_tfm(tfm);
+        ll_crypto_free_hash(tfm);
         return code;
 }
 
@@ -811,20 +846,24 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
 }
 
 static
-int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
+int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
                          int mode_ecb,
                          int inobj_cnt,
                          rawobj_t *inobjs,
                          rawobj_t *outobj,
                          int enc)
 {
-        struct scatterlist src, dst;
-        __u8               local_iv[16] = {0}, *buf;
-        __u32              datalen = 0;
-        int                i, rc;
+        struct blkcipher_desc desc;
+        struct scatterlist    src, dst;
+        __u8                  local_iv[16] = {0}, *buf;
+        __u32                 datalen = 0;
+        int                   i, rc;
         ENTRY;
 
         buf = outobj->data;
+        desc.tfm  = tfm;
+        desc.info = local_iv;
+        desc.flags = 0;
 
         for (i = 0; i < inobj_cnt; i++) {
                 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
@@ -834,18 +873,18 @@ int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
 
                 if (mode_ecb) {
                         if (enc)
-                                rc = crypto_cipher_encrypt(
-                                        tfm, &dst, &src, src.length);
+                                rc = ll_crypto_blkcipher_encrypt(
+                                        &desc, &dst, &src, src.length);
                         else
-                                rc = crypto_cipher_decrypt(
-                                        tfm, &dst, &src, src.length);
+                                rc = ll_crypto_blkcipher_decrypt(
+                                        &desc, &dst, &src, src.length);
                 } else {
                         if (enc)
-                                rc = crypto_cipher_encrypt_iv(
-                                        tfm, &dst, &src, src.length, local_iv);
+                                rc = ll_crypto_blkcipher_encrypt_iv(
+                                        &desc, &dst, &src, src.length);
                         else
-                                rc = crypto_cipher_decrypt_iv(
-                                        tfm, &dst, &src, src.length, local_iv);
+                                rc = ll_crypto_blkcipher_decrypt_iv(
+                                        &desc, &dst, &src, src.length);
                 }
 
                 if (rc) {
@@ -881,7 +920,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
         LASSERT(kctx->kc_keye.kb_tfm == NULL ||
                 ke->ke_conf_size >=
-                crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm));
+                ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
 
         acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
 
@@ -908,7 +947,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
         LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -941,8 +980,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
         LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
 
         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                arc4_keye;
-                struct crypto_tfm      *arc4_tfm;
+                rawobj_t                 arc4_keye;
+                struct ll_crypto_cipher *arc4_tfm;
 
                 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
                                        NULL, 1, &cksum, &arc4_keye)) {
@@ -950,14 +989,14 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                         GOTO(arc4_out, enc_rc = -EACCES);
                 }
 
-                arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
+                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
                 if (arc4_tfm == NULL) {
                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
                         GOTO(arc4_out_key, enc_rc = -EACCES);
                 }
 
-                if (crypto_cipher_setkey(arc4_tfm,
-                                         arc4_keye.data, arc4_keye.len)) {
+                if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+                                               arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
                         GOTO(arc4_out_tfm, enc_rc = -EACCES);
@@ -966,7 +1005,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
                 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
                                               3, data_desc, &cipher, 1);
 arc4_out_tfm:
-                crypto_free_tfm(arc4_tfm);
+                ll_crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
@@ -1048,7 +1087,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 blocksize = 1;
         } else {
                 LASSERT(kctx->kc_keye.kb_tfm);
-                blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+                blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
         }
 
         /* expected token layout:
@@ -1085,8 +1124,8 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
         plain_out.len = bodysize;
 
         if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
-                rawobj_t                arc4_keye;
-                struct crypto_tfm      *arc4_tfm;
+                rawobj_t                 arc4_keye;
+                struct ll_crypto_cipher *arc4_tfm;
 
                 cksum.data = token->data + token->len - ke->ke_hash_size;
                 cksum.len = ke->ke_hash_size;
@@ -1097,13 +1136,13 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                         GOTO(arc4_out, enc_rc = -EACCES);
                 }
 
-                arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
+                arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
                 if (arc4_tfm == NULL) {
                         CERROR("failed to alloc tfm arc4 in ECB mode\n");
                         GOTO(arc4_out_key, enc_rc = -EACCES);
                 }
 
-                if (crypto_cipher_setkey(arc4_tfm,
+                if (ll_crypto_blkcipher_setkey(arc4_tfm,
                                          arc4_keye.data, arc4_keye.len)) {
                         CERROR("failed to set arc4 key, len %d\n",
                                arc4_keye.len);
@@ -1113,7 +1152,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
                 enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
                                               1, &cipher_in, &plain_out, 0);
 arc4_out_tfm:
-                crypto_free_tfm(arc4_tfm);
+                ll_crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
                 rawobj_free(&arc4_keye);
 arc4_out:
index 5bd1768..7f49fd1 100644 (file)
@@ -2144,6 +2144,39 @@ const char * sec2target_str(struct ptlrpc_sec *sec)
 EXPORT_SYMBOL(sec2target_str);
 
 /****************************************
+ * crypto API helper/alloc blkciper     *
+ ****************************************/
+
+#ifdef __KERNEL__
+#ifndef HAVE_ASYNC_BLOCK_CIPHER
+struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char * algname,
+                                                   u32 type, u32 mask)
+{
+        char        buf[CRYPTO_MAX_ALG_NAME + 1];
+        const char *pan = algname;
+        u32         flag = 0; 
+
+        if (strncmp("cbc(", algname, 4) == 0)
+                flag |= CRYPTO_TFM_MODE_CBC;
+        else if (strncmp("ecb(", algname, 4) == 0)
+                flag |= CRYPTO_TFM_MODE_ECB;
+        if (flag) {
+                char *vp = strnchr(algname, CRYPTO_MAX_ALG_NAME, ')');
+                if (vp) {
+                        memcpy(buf, algname + 4, vp - algname - 4);
+                        buf[vp - algname - 4] = '\0';
+                        pan = buf;
+                } else {
+                        flag = 0;
+                }
+        }
+        return crypto_alloc_tfm(pan, flag);
+}
+EXPORT_SYMBOL(ll_crypto_alloc_blkcipher);
+#endif
+#endif
+
+/****************************************
  * initialize/finalize                  *
  ****************************************/
 
index f09663e..e49d19b 100644 (file)
@@ -904,9 +904,9 @@ static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
 
 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
 {
-        struct crypto_tfm *tfm;
+        struct hash_desc    hdesc;
         struct scatterlist *sl;
-        int i, rc = 0;
+        int i, rc = 0, bytes = 0;
 
         LASSERT(alg > BULK_HASH_ALG_NULL &&
                 alg < BULK_HASH_ALG_MAX);
@@ -923,11 +923,12 @@ static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
                 return do_bulk_checksum_crc32(desc, buf);
         }
 
-        tfm = crypto_alloc_tfm(hash_types[alg].sht_tfm_name, 0);
-        if (tfm == NULL) {
+        hdesc.tfm = ll_crypto_alloc_hash(hash_types[alg].sht_tfm_name, 0, 0);
+        if (hdesc.tfm == NULL) {
                 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
                 return -ENOMEM;
         }
+        hdesc.flags = 0;
 
         OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
         if (sl == NULL) {
@@ -939,16 +940,17 @@ static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
                 sl[i].page = desc->bd_iov[i].kiov_page;
                 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
                 sl[i].length = desc->bd_iov[i].kiov_len;
+                bytes += desc->bd_iov[i].kiov_len;
         }
 
-        crypto_digest_init(tfm);
-        crypto_digest_update(tfm, sl, desc->bd_iov_count);
-        crypto_digest_final(tfm, buf);
+        ll_crypto_hash_init(&hdesc);
+        ll_crypto_hash_update(&hdesc, sl, bytes);
+        ll_crypto_hash_final(&hdesc, buf);
 
         OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
 
 out_tfm:
-        crypto_free_tfm(tfm);
+        ll_crypto_free_hash(hdesc.tfm);
         return rc;
 }
 
@@ -1267,28 +1269,28 @@ static struct sptlrpc_ciph_type cipher_types[] = {
                 "null",         "null",       0,                   0,  0
         },
         [BULK_CIPH_ALG_ARC4]    = {
-                "arc4",         "arc4",       CRYPTO_TFM_MODE_ECB, 0,  16
+                "arc4",         "ecb(arc4)",       0, 0,  16
         },
         [BULK_CIPH_ALG_AES128]  = {
-                "aes128",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 16
+                "aes128",       "cbc(aes)",        0, 16, 16
         },
         [BULK_CIPH_ALG_AES192]  = {
-                "aes192",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 24
+                "aes192",       "cbc(aes)",        0, 16, 24
         },
         [BULK_CIPH_ALG_AES256]  = {
-                "aes256",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 32
+                "aes256",       "cbc(aes)",        0, 16, 32
         },
         [BULK_CIPH_ALG_CAST128] = {
-                "cast128",      "cast5",      CRYPTO_TFM_MODE_CBC, 8,  16
+                "cast128",      "cbc(cast5)",      0, 8,  16
         },
         [BULK_CIPH_ALG_CAST256] = {
-                "cast256",      "cast6",      CRYPTO_TFM_MODE_CBC, 16, 32
+                "cast256",      "cbc(cast6)",      0, 16, 32
         },
         [BULK_CIPH_ALG_TWOFISH128] = {
-                "twofish128",   "twofish",    CRYPTO_TFM_MODE_CBC, 16, 16
+                "twofish128",   "cbc(twofish)",    0, 16, 16
         },
         [BULK_CIPH_ALG_TWOFISH256] = {
-                "twofish256",   "twofish",    CRYPTO_TFM_MODE_CBC, 16, 32
+                "twofish256",   "cbc(twofish)",    0, 16, 32
         },
 };