.ke_hash_size = 16,
.ke_conf_size = 8,
},
+#ifdef HAVE_DES3_SUPPORT
[ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
.ke_dispname = "des3-hmac-sha1",
.ke_enc_name = "cbc(des3_ede)",
.ke_conf_size = 8,
.ke_hash_hmac = 1,
},
+#endif
[ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
.ke_dispname = "aes128-cts-hmac-sha1-96",
.ke_enc_name = "cbc(aes)",
struct gss_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
+ int iovcnt, struct bio_vec *iovs,
rawobj_t *cksum,
digest_hash hash_func)
{
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
- struct krb5_header *khdr,
- char *confounder,
- struct ptlrpc_bulk_desc *desc,
- rawobj_t *cipher,
- int adj_nob)
+int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
{
- struct blkcipher_desc ciph_desc;
- __u8 local_iv[16] = {0};
- struct scatterlist src, dst;
- struct sg_table sg_src, sg_dst;
- int blocksize, i, rc, nob = 0;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
+ int blocksize, i, rc, nob = 0;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
-
- blocksize = crypto_blkcipher_blocksize(tfm);
- LASSERT(blocksize > 1);
- LASSERT(cipher->len == blocksize + sizeof(*khdr));
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_vec);
- ciph_desc.tfm = tfm;
- ciph_desc.info = local_iv;
- ciph_desc.flags = 0;
+ blocksize = crypto_sync_skcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
- /* encrypt confounder */
+ /* encrypt confounder */
rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
if (rc != 0)
return rc;
gss_teardown_sgtable(&sg_src);
return rc;
}
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ blocksize, local_iv);
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
- sg_src.sgl, blocksize);
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
- if (rc) {
- CERROR("error to encrypt confounder: %d\n", rc);
- return rc;
- }
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
- /* encrypt clear pages */
- for (i = 0; i < desc->bd_iov_count; i++) {
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
- sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page,
- (BD_GET_KIOV(desc, i).kiov_len +
+ sg_set_page(&src, desc->bd_vec[i].bv_page,
+ (desc->bd_vec[i].bv_len +
blocksize - 1) &
(~(blocksize - 1)),
- BD_GET_KIOV(desc, i).kiov_offset);
+ desc->bd_vec[i].bv_offset);
if (adj_nob)
nob += src.length;
sg_init_table(&dst, 1);
- sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page,
+ sg_set_page(&dst, desc->bd_enc_vec[i].bv_page,
src.length, src.offset);
- BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length;
+ desc->bd_enc_vec[i].bv_offset = dst.offset;
+ desc->bd_enc_vec[i].bv_len = dst.length;
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
- src.length);
- if (rc) {
- CERROR("error to encrypt page: %d\n", rc);
- return rc;
- }
- }
+ skcipher_request_set_crypt(req, &src, &dst,
+ src.length, local_iv);
+ rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
+ }
- /* encrypt krb5 header */
+ /* encrypt krb5 header */
rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
- if (rc != 0)
+ if (rc != 0) {
+ skcipher_request_zero(req);
return rc;
+ }
rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
sizeof(*khdr));
if (rc != 0) {
gss_teardown_sgtable(&sg_src);
+ skcipher_request_zero(req);
return rc;
}
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
- sizeof(*khdr));
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ sizeof(*khdr), local_iv);
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+ sizeof(*khdr));
+ skcipher_request_zero(req);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
* desc->bd_nob_transferred is the size of cipher text received.
* desc->bd_nob is the target size of plain text supposed to be.
*
- * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * if adj_nob != 0, we adjust each page's bv_len to the actual
* plain text size.
* - for client read: we don't know data size for each page, so
- * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might
* be smaller, so we need to adjust it according to
- * bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
+ * bd_u.bd_kiov.bd_enc_vec[]->bv_len.
* this means we DO NOT support the situation that server send an odd size
* data in a page which is not the last one.
* - for server write: we knows exactly data size for each page being expected,
- * thus kiov_len is accurate already, so we should not adjust it at all.
- * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
- * round_up(bd_iov[]->kiov_len) which
+ * thus bv_len is accurate already, so we should not adjust it at all.
+ * and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be
+ * round_up(bd_iov[]->bv_len) which
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
- struct krb5_header *khdr,
- struct ptlrpc_bulk_desc *desc,
- rawobj_t *cipher,
- rawobj_t *plain,
- int adj_nob)
+int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain,
+ int adj_nob)
{
- struct blkcipher_desc ciph_desc;
- __u8 local_iv[16] = {0};
- struct scatterlist src, dst;
- struct sg_table sg_src, sg_dst;
- int ct_nob = 0, pt_nob = 0;
- int blocksize, i, rc;
-
- LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
- LASSERT(desc->bd_nob_transferred);
-
- blocksize = crypto_blkcipher_blocksize(tfm);
- LASSERT(blocksize > 1);
- LASSERT(cipher->len == blocksize + sizeof(*khdr));
-
- ciph_desc.tfm = tfm;
- ciph_desc.info = local_iv;
- ciph_desc.flags = 0;
-
- if (desc->bd_nob_transferred % blocksize) {
- CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
- return -EPROTO;
- }
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_vec);
+ LASSERT(desc->bd_nob_transferred);
+
+ blocksize = crypto_sync_skcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
- /* decrypt head (confounder) */
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ /* decrypt head (confounder) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
if (rc != 0)
return rc;
return rc;
}
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
- sg_src.sgl, blocksize);
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ blocksize, local_iv);
+
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
- if (rc) {
- CERROR("error to decrypt confounder: %d\n", rc);
- return rc;
- }
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize
- != 0 ||
- BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize
- != 0) {
+ if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
+ desc->bd_enc_vec[i].bv_len % blocksize != 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
- i, BD_GET_ENC_KIOV(desc, i).kiov_offset,
- BD_GET_ENC_KIOV(desc, i).kiov_len,
+ i, desc->bd_enc_vec[i].bv_offset,
+ desc->bd_enc_vec[i].bv_len,
blocksize);
+ skcipher_request_zero(req);
return -EFAULT;
}
if (adj_nob) {
- if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ if (ct_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob_transferred)
- BD_GET_ENC_KIOV(desc, i).kiov_len =
+ desc->bd_enc_vec[i].bv_len =
desc->bd_nob_transferred - ct_nob;
- BD_GET_KIOV(desc, i).kiov_len =
- BD_GET_ENC_KIOV(desc, i).kiov_len;
- if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len >
+ desc->bd_vec[i].bv_len =
+ desc->bd_enc_vec[i].bv_len;
+ if (pt_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob)
- BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_vec[i].bv_len =
desc->bd_nob - pt_nob;
} else {
/* this should be guaranteed by LNET */
- LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i).
- kiov_len <=
+ LASSERT(ct_nob + desc->bd_enc_vec[i].
+ bv_len <=
desc->bd_nob_transferred);
- LASSERT(BD_GET_KIOV(desc, i).kiov_len <=
- BD_GET_ENC_KIOV(desc, i).kiov_len);
+ LASSERT(desc->bd_vec[i].bv_len <=
+ desc->bd_enc_vec[i].bv_len);
}
- if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0)
+ if (desc->bd_enc_vec[i].bv_len == 0)
continue;
sg_init_table(&src, 1);
- sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page,
- BD_GET_ENC_KIOV(desc, i).kiov_len,
- BD_GET_ENC_KIOV(desc, i).kiov_offset);
+ sg_set_page(&src, desc->bd_enc_vec[i].bv_page,
+ desc->bd_enc_vec[i].bv_len,
+ desc->bd_enc_vec[i].bv_offset);
dst = src;
- if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0)
+ if (desc->bd_vec[i].bv_len % blocksize == 0)
sg_assign_page(&dst,
- BD_GET_KIOV(desc, i).kiov_page);
-
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
- src.length);
- if (rc) {
- CERROR("error to decrypt page: %d\n", rc);
- return rc;
- }
-
- if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) {
- memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) +
- BD_GET_KIOV(desc, i).kiov_offset,
- page_address(BD_GET_ENC_KIOV(desc, i).
- kiov_page) +
- BD_GET_KIOV(desc, i).kiov_offset,
- BD_GET_KIOV(desc, i).kiov_len);
+ desc->bd_vec[i].bv_page);
+
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ src.length, local_iv);
+ rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
+
+ if (desc->bd_vec[i].bv_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_vec[i].bv_page) +
+ desc->bd_vec[i].bv_offset,
+ page_address(desc->bd_enc_vec[i].
+ bv_page) +
+ desc->bd_vec[i].bv_offset,
+ desc->bd_vec[i].bv_len);
}
- ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len;
- pt_nob += BD_GET_KIOV(desc, i).kiov_len;
+ ct_nob += desc->bd_enc_vec[i].bv_len;
+ pt_nob += desc->bd_vec[i].bv_len;
}
- if (unlikely(ct_nob != desc->bd_nob_transferred)) {
- CERROR("%d cipher text transferred but only %d decrypted\n",
- desc->bd_nob_transferred, ct_nob);
- return -EFAULT;
- }
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ skcipher_request_zero(req);
+ return -EFAULT;
+ }
- if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
- CERROR("%d plain text expected but only %d received\n",
- desc->bd_nob, pt_nob);
- return -EFAULT;
- }
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ skcipher_request_zero(req);
+ return -EFAULT;
+ }
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- BD_GET_KIOV(desc, i++).kiov_len = 0;
+ desc->bd_vec[i++].bv_len = 0;
- /* decrypt tail (krb5 header) */
+ /* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
sizeof(*khdr));
if (rc != 0)
return rc;
}
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
- sizeof(*khdr));
-
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ src.length, local_iv);
+ rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+ sizeof(*khdr));
gss_teardown_sgtable(&sg_src);
gss_teardown_sgtable(&sg_dst);
- if (rc) {
- CERROR("error to decrypt tail: %d\n", rc);
- return rc;
- }
+ skcipher_request_zero(req);
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
- if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
- CERROR("krb5 header doesn't match\n");
- return -EACCES;
- }
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
- return 0;
+ return 0;
}
static
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_sync_skcipher_blocksize(
+ kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye = RAWOBJ_EMPTY;
- struct crypto_blkcipher *arc4_tfm;
+ struct crypto_sync_skcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye,
GOTO(arc4_out_key, rc = -EACCES);
}
- arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
- arc4_keye.len)) {
+ if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
GOTO(arc4_out_tfm, rc = -EACCES);
rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
&cipher, 1);
arc4_out_tfm:
- crypto_free_blkcipher(arc4_tfm);
+ crypto_free_sync_skcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
} else {
int blocksize, i;
LASSERT(desc->bd_iov_count);
- LASSERT(GET_ENC_KIOV(desc));
+ LASSERT(desc->bd_enc_vec);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page);
+ LASSERT(desc->bd_enc_vec[i].bv_page);
/*
* offset should always start at page boundary of either
* client or server side.
*/
- if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) {
+ if (desc->bd_vec[i].bv_offset & blocksize) {
CERROR("odd offset %d in page %d\n",
- BD_GET_KIOV(desc, i).kiov_offset, i);
+ desc->bd_vec[i].bv_offset, i);
return GSS_S_FAILURE;
}
- BD_GET_ENC_KIOV(desc, i).kiov_offset =
- BD_GET_KIOV(desc, i).kiov_offset;
- BD_GET_ENC_KIOV(desc, i).kiov_len =
- (BD_GET_KIOV(desc, i).kiov_len +
+ desc->bd_enc_vec[i].bv_offset =
+ desc->bd_vec[i].bv_offset;
+ desc->bd_enc_vec[i].bv_len =
+ (desc->bd_vec[i].bv_len +
blocksize - 1) & (~(blocksize - 1));
}
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- int blocksize;
+ int blocksz;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t data_desc[1], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
* a tfm, currently only for arcfour-hmac */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
* the bulk token size would be exactly (sizeof(krb5_header) +
* blocksize + sizeof(krb5_header) + hashsize)
*/
- LASSERT(blocksize <= ke->ke_conf_size);
- LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
- LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+ LASSERT(blocksz <= ke->ke_conf_size);
+ LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
+ LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
/*
* clear text layout for checksum:
/* compute checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
- desc->bd_iov_count, GET_KIOV(desc),
+ desc->bd_iov_count, desc->bd_vec,
&cksum, gctx->hash_func))
GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
data_desc[0].len = ke->ke_conf_size;
cipher.data = (__u8 *)(khdr + 1);
- cipher.len = blocksize + sizeof(*khdr);
+ cipher.len = blocksz + sizeof(*khdr);
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LBUG();
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
unsigned char *tmpbuf;
- int blocksize, bodysize;
+ int blocksz, bodysize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher_in, plain_out;
rawobj_t hash_objs[3];
/* block size */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
*/
bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
- if (bodysize % blocksize) {
+ if (bodysize % blocksz) {
CERROR("odd bodysize %d\n", bodysize);
return GSS_S_DEFECTIVE_TOKEN;
}
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct crypto_blkcipher *arc4_tfm;
+ struct crypto_sync_skcipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (crypto_blkcipher_setkey(arc4_tfm,
- arc4_keye.data, arc4_keye.len)) {
+ if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
GOTO(arc4_out_tfm, rc = -EACCES);
rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
&plain_out, 0);
arc4_out_tfm:
- crypto_free_blkcipher(arc4_tfm);
+ crypto_free_sync_skcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- int blocksize;
+ int blocksz;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher, plain;
rawobj_t data_desc[1];
/* block size */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
- LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+ LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
/*
* token format is expected as:
* | krb5 header | head/tail cipher text | cksum |
* -----------------------------------------------
*/
- if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+ if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
ke->ke_hash_size) {
CERROR("short token size: %u\n", token->len);
return GSS_S_DEFECTIVE_TOKEN;
}
cipher.data = (__u8 *) (khdr + 1);
- cipher.len = blocksize + sizeof(*khdr);
+ cipher.len = blocksz + sizeof(*khdr);
plain.data = cipher.data;
plain.len = cipher.len;
* ------------------------------------------
*/
data_desc[0].data = plain.data;
- data_desc[0].len = blocksize;
+ data_desc[0].len = blocksz;
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
desc->bd_iov_count,
- GET_KIOV(desc),
+ desc->bd_vec,
&cksum, gctx->hash_func))
return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
- if (memcmp(plain.data + blocksize + sizeof(*khdr),
+ if (memcmp(plain.data + blocksz + sizeof(*khdr),
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
CERROR("checksum mismatch\n");