Whamcloud - gitweb
b=16098
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
index e8ede29..c057983 100644 (file)
@@ -1,25 +1,41 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- * Copyright (C) 2008 Sun Microsystems. Inc.
- *   Author: Eric Mei <eric.mei@sun.com>
- * Copyright (C) 2006,2007 Cluster File Systems, Inc.
- *   Author: Eric Mei <ericm@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see [sun.com URL with a
+ * copy of GPLv2].
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_bulk.c
+ *
+ * Author: Eric Mei <eric.mei@sun.com>
  */
 
 #ifndef EXPORT_SYMTAB
@@ -76,18 +92,19 @@ static void buf_to_sl(struct scatterlist *sl,
  * 3. swap the last two ciphertext blocks.
  * 4. truncate to original plaintext size.
  */
-static int cbc_cts_encrypt(struct crypto_tfm *tfm,
-                           struct scatterlist *sld,
-                           struct scatterlist *sls)
+static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
+                           struct scatterlist      *sld,
+                           struct scatterlist      *sls)
 {
         struct scatterlist      slst, sldt;
+        struct blkcipher_desc   desc;
         void                   *data;
         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
         unsigned int            blksize, blks, tail;
         int                     rc;
 
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
         blks = sls->length / blksize;
         tail = sls->length % blksize;
         LASSERT(blks > 0 && tail > 0);
@@ -100,15 +117,17 @@ static int cbc_cts_encrypt(struct crypto_tfm *tfm,
 
         buf_to_sl(&slst, sbuf, blksize);
         buf_to_sl(&sldt, dbuf, blksize);
+        desc.tfm   = tfm;
+        desc.flags = 0;
 
         /* encrypt head */
-        rc = crypto_cipher_encrypt(tfm, sld, sls, sls->length - tail);
+        rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
         if (unlikely(rc)) {
                 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
                 return rc;
         }
         /* encrypt tail */
-        rc = crypto_cipher_encrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
                 return rc;
@@ -142,10 +161,11 @@ static int cbc_cts_encrypt(struct crypto_tfm *tfm,
  * 4. do CBC decryption.
  * 5. truncate to original ciphertext size.
  */
-static int cbc_cts_decrypt(struct crypto_tfm *tfm,
+static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
                            struct scatterlist *sld,
                            struct scatterlist *sls)
 {
+        struct blkcipher_desc   desc;
         struct scatterlist      slst, sldt;
         void                   *data;
         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
@@ -153,14 +173,14 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         unsigned int            blksize, blks, tail;
         int                     rc;
 
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
         blks = sls->length / blksize;
         tail = sls->length % blksize;
         LASSERT(blks > 0 && tail > 0);
 
         /* save current IV, and set IV to zero */
-        crypto_cipher_get_iv(tfm, sbuf, blksize);
-        crypto_cipher_set_iv(tfm, zero_iv, blksize);
+        ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
+        ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
 
         /* D(n) = Decrypt(K, C(n-1)) */
         slst = *sls;
@@ -168,15 +188,17 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         slst.length = blksize;
 
         buf_to_sl(&sldt, dbuf, blksize);
+        desc.tfm   = tfm;
+        desc.flags = 0;
 
-        rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
                 return rc;
         }
 
         /* restore IV */
-        crypto_cipher_set_iv(tfm, sbuf, blksize);
+        ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
 
         data = cfs_kmap(sls->page);
         /* C(n) = C(n) | TAIL(D(n)) */
@@ -191,13 +213,13 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         buf_to_sl(&sldt, dbuf, blksize);
 
         /* decrypt head */
-        rc = crypto_cipher_decrypt(tfm, sld, sls, sls->length - tail);
+        rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
         if (unlikely(rc)) {
                 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
                 return rc;
         }
         /* decrypt tail */
-        rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+        rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
         if (unlikely(rc)) {
                 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
                 return rc;
@@ -211,12 +233,14 @@ static int cbc_cts_decrypt(struct crypto_tfm *tfm,
         return 0;
 }
 
-static inline int do_cts_tfm(struct crypto_tfm *tfm,
+static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
                              int encrypt,
                              struct scatterlist *sld,
                              struct scatterlist *sls)
 {
+#ifndef HAVE_ASYNC_BLOCK_CIPHER
         LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
+#endif
 
         if (encrypt)
                 return cbc_cts_encrypt(tfm, sld, sls);
@@ -227,33 +251,36 @@ static inline int do_cts_tfm(struct crypto_tfm *tfm,
 /*
  * normal encrypt/decrypt of data of even blocksize
  */
-static inline int do_cipher_tfm(struct crypto_tfm *tfm,
+static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
                                 int encrypt,
                                 struct scatterlist *sld,
                                 struct scatterlist *sls)
 {
+        struct blkcipher_desc desc;
+        desc.tfm   = tfm;
+        desc.flags = 0;
         if (encrypt)
-                return crypto_cipher_encrypt(tfm, sld, sls, sls->length);
+                return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
         else
-                return crypto_cipher_decrypt(tfm, sld, sls, sls->length);
+                return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
 }
 
-static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
+static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
 {
         const struct sptlrpc_ciph_type *ct;
-        struct crypto_tfm              *tfm;
+        struct ll_crypto_cipher        *tfm;
         int                             rc;
 
         /* using ARC4, the only stream cipher in linux for now */
         ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
         LASSERT(ct);
 
-        tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
+        tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
         if (tfm == NULL) {
                 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
                 return NULL;
         }
-        LASSERT(crypto_tfm_alg_blocksize(tfm));
+        LASSERT(ll_crypto_blkcipher_blocksize(tfm));
 
         if (keylen > ct->sct_keysize)
                 keylen = ct->sct_keysize;
@@ -261,10 +288,10 @@ static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
         LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
         LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
 
-        rc = crypto_cipher_setkey(tfm, key, keylen);
+        rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
         if (rc) {
                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
-                crypto_free_tfm(tfm);
+                ll_crypto_free_blkcipher(tfm);
                 return NULL;
         }
 
@@ -277,12 +304,12 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                            struct ptlrpc_bulk_sec_desc *bsd)
 {
         const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
-        struct crypto_tfm  *tfm;
-        struct crypto_tfm  *stfm = NULL; /* backup stream cipher */
-        struct scatterlist  sls, sld, *sldp;
-        unsigned int        blksize, keygen_size;
-        int                 i, rc;
-        __u8                key[CIPHER_MAX_KEYSIZE];
+        struct ll_crypto_cipher  *tfm;
+        struct ll_crypto_cipher  *stfm = NULL; /* backup stream cipher */
+        struct scatterlist        sls, sld, *sldp;
+        unsigned int              blksize, keygen_size;
+        int                       i, rc;
+        __u8                      key[CIPHER_MAX_KEYSIZE];
 
         LASSERT(ct);
 
@@ -298,17 +325,17 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                 return 0;
         }
 
-        tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
+        tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
         if (tfm == NULL) {
                 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
                 return -ENOMEM;
         }
-        blksize = crypto_tfm_alg_blocksize(tfm);
+        blksize = ll_crypto_blkcipher_blocksize(tfm);
 
         LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
         LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
         LASSERT(ct->sct_ivsize == 0 ||
-                crypto_tfm_alg_ivsize(tfm) == ct->sct_ivsize);
+                ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
         LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
         LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
 
@@ -331,7 +358,7 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
                 goto out;
         }
 
-        rc = crypto_cipher_setkey(tfm, key, ct->sct_keysize);
+        rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
         if (rc) {
                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
                 goto out;
@@ -339,7 +366,7 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
 
         /* stream cipher doesn't need iv */
         if (blksize > 1)
-                crypto_cipher_set_iv(tfm, zero_iv, blksize);
+                ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
 
         for (i = 0; i < desc->bd_iov_count; i++) {
                 sls.page = desc->bd_iov[i].kiov_page;
@@ -405,9 +432,9 @@ static int do_bulk_privacy(struct gss_ctx *gctx,
 
 out:
         if (stfm)
-                crypto_free_tfm(stfm);
+                ll_crypto_free_blkcipher(stfm);
 
-        crypto_free_tfm(tfm);
+        ll_crypto_free_blkcipher(tfm);
         return rc;
 }
 
@@ -498,7 +525,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
         case SPTLRPC_SVC_NULL:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
                 voff = vmsg->lm_bufcount - 1;
                 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
 
@@ -508,7 +535,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                 break;
         case SPTLRPC_SVC_AUTH:
         case SPTLRPC_SVC_INTG:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
                 voff = vmsg->lm_bufcount - 2;
                 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
 
@@ -517,7 +544,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
                 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
                 break;
         case SPTLRPC_SVC_PRIV:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
                 voff = vmsg->lm_bufcount - 1;
                 LASSERT(vmsg && vmsg->lm_bufcount >= 2);