* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
- struct krb5_header *khdr,
- char *confounder,
- struct ptlrpc_bulk_desc *desc,
- rawobj_t *cipher,
- int adj_nob)
+int krb5_encrypt_bulk(struct crypto_sync_skcipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
{
- struct blkcipher_desc ciph_desc;
- __u8 local_iv[16] = {0};
- struct scatterlist src, dst;
- struct sg_table sg_src, sg_dst;
- int blocksize, i, rc, nob = 0;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
+ int blocksize, i, rc, nob = 0;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_iov_count);
LASSERT(desc->bd_enc_vec);
- blocksize = crypto_blkcipher_blocksize(tfm);
- LASSERT(blocksize > 1);
- LASSERT(cipher->len == blocksize + sizeof(*khdr));
-
- ciph_desc.tfm = tfm;
- ciph_desc.info = local_iv;
- ciph_desc.flags = 0;
+ blocksize = crypto_sync_skcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
- /* encrypt confounder */
+ /* encrypt confounder */
rc = gss_setup_sgtable(&sg_src, &src, confounder, blocksize);
if (rc != 0)
return rc;
gss_teardown_sgtable(&sg_src);
return rc;
}
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ blocksize, local_iv);
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl,
- sg_src.sgl, blocksize);
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
- if (rc) {
- CERROR("error to encrypt confounder: %d\n", rc);
- return rc;
- }
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
- /* encrypt clear pages */
- for (i = 0; i < desc->bd_iov_count; i++) {
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
sg_set_page(&src, desc->bd_vec[i].bv_page,
(desc->bd_vec[i].bv_len +
desc->bd_enc_vec[i].bv_offset = dst.offset;
desc->bd_enc_vec[i].bv_len = dst.length;
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
- src.length);
- if (rc) {
- CERROR("error to encrypt page: %d\n", rc);
- return rc;
- }
- }
+ skcipher_request_set_crypt(req, &src, &dst,
+ src.length, local_iv);
+ rc = crypto_skcipher_encrypt_iv(req, &dst, &src, src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
+ }
- /* encrypt krb5 header */
+ /* encrypt krb5 header */
rc = gss_setup_sgtable(&sg_src, &src, khdr, sizeof(*khdr));
- if (rc != 0)
+ if (rc != 0) {
+ skcipher_request_zero(req);
return rc;
+ }
rc = gss_setup_sgtable(&sg_dst, &dst, cipher->data + blocksize,
sizeof(*khdr));
if (rc != 0) {
gss_teardown_sgtable(&sg_src);
+ skcipher_request_zero(req);
return rc;
}
- rc = crypto_blkcipher_encrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
- sizeof(*khdr));
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ sizeof(*khdr), local_iv);
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+ sizeof(*khdr));
+ skcipher_request_zero(req);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
- struct krb5_header *khdr,
- struct ptlrpc_bulk_desc *desc,
- rawobj_t *cipher,
- rawobj_t *plain,
- int adj_nob)
+int krb5_decrypt_bulk(struct crypto_sync_skcipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain,
+ int adj_nob)
{
- struct blkcipher_desc ciph_desc;
- __u8 local_iv[16] = {0};
- struct scatterlist src, dst;
- struct sg_table sg_src, sg_dst;
- int ct_nob = 0, pt_nob = 0;
- int blocksize, i, rc;
-
- LASSERT(desc->bd_iov_count);
- LASSERT(desc->bd_enc_vec);
- LASSERT(desc->bd_nob_transferred);
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ struct sg_table sg_src, sg_dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- blocksize = crypto_blkcipher_blocksize(tfm);
- LASSERT(blocksize > 1);
- LASSERT(cipher->len == blocksize + sizeof(*khdr));
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_vec);
+ LASSERT(desc->bd_nob_transferred);
- ciph_desc.tfm = tfm;
- ciph_desc.info = local_iv;
- ciph_desc.flags = 0;
+ blocksize = crypto_sync_skcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
- if (desc->bd_nob_transferred % blocksize) {
- CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
- return -EPROTO;
- }
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
- /* decrypt head (confounder) */
+ /* decrypt head (confounder) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data, blocksize);
if (rc != 0)
return rc;
return rc;
}
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl,
- sg_src.sgl, blocksize);
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ blocksize, local_iv);
+
+ rc = crypto_skcipher_encrypt_iv(req, sg_dst.sgl, sg_src.sgl, blocksize);
gss_teardown_sgtable(&sg_dst);
gss_teardown_sgtable(&sg_src);
- if (rc) {
- CERROR("error to decrypt confounder: %d\n", rc);
- return rc;
- }
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (desc->bd_enc_vec[i].bv_offset % blocksize
- != 0 ||
- desc->bd_enc_vec[i].bv_len % blocksize
- != 0) {
+ if (desc->bd_enc_vec[i].bv_offset % blocksize != 0 ||
+ desc->bd_enc_vec[i].bv_len % blocksize != 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
i, desc->bd_enc_vec[i].bv_offset,
desc->bd_enc_vec[i].bv_len,
blocksize);
+ skcipher_request_zero(req);
return -EFAULT;
}
sg_assign_page(&dst,
desc->bd_vec[i].bv_page);
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
- src.length);
- if (rc) {
- CERROR("error to decrypt page: %d\n", rc);
- return rc;
- }
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ src.length, local_iv);
+ rc = crypto_skcipher_decrypt_iv(req, &dst, &src, src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ skcipher_request_zero(req);
+ return rc;
+ }
if (desc->bd_vec[i].bv_len % blocksize != 0) {
memcpy(page_address(desc->bd_vec[i].bv_page) +
pt_nob += desc->bd_vec[i].bv_len;
}
- if (unlikely(ct_nob != desc->bd_nob_transferred)) {
- CERROR("%d cipher text transferred but only %d decrypted\n",
- desc->bd_nob_transferred, ct_nob);
- return -EFAULT;
- }
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ skcipher_request_zero(req);
+ return -EFAULT;
+ }
- if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
- CERROR("%d plain text expected but only %d received\n",
- desc->bd_nob, pt_nob);
- return -EFAULT;
- }
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ skcipher_request_zero(req);
+ return -EFAULT;
+ }
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
desc->bd_vec[i++].bv_len = 0;
- /* decrypt tail (krb5 header) */
+ /* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
sizeof(*khdr));
if (rc != 0)
return rc;
}
- rc = crypto_blkcipher_decrypt_iv(&ciph_desc, sg_dst.sgl, sg_src.sgl,
- sizeof(*khdr));
-
+ skcipher_request_set_crypt(req, sg_src.sgl, sg_dst.sgl,
+ src.length, local_iv);
+ rc = crypto_skcipher_decrypt_iv(req, sg_dst.sgl, sg_src.sgl,
+ sizeof(*khdr));
gss_teardown_sgtable(&sg_src);
gss_teardown_sgtable(&sg_dst);
- if (rc) {
- CERROR("error to decrypt tail: %d\n", rc);
- return rc;
- }
+ skcipher_request_zero(req);
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
- if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
- CERROR("krb5 header doesn't match\n");
- return -EACCES;
- }
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
- return 0;
+ return 0;
}
static
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_sync_skcipher_blocksize(
+ kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye = RAWOBJ_EMPTY;
- struct crypto_blkcipher *arc4_tfm;
+ struct crypto_sync_skcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye,
GOTO(arc4_out_key, rc = -EACCES);
}
- arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
- arc4_keye.len)) {
+ if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
GOTO(arc4_out_tfm, rc = -EACCES);
rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc,
&cipher, 1);
arc4_out_tfm:
- crypto_free_blkcipher(arc4_tfm);
+ crypto_free_sync_skcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
} else {
LASSERT(desc->bd_enc_vec);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_vec[i].bv_page);
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- int blocksize;
+ int blocksz;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t data_desc[1], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
* a tfm, currently only for arcfour-hmac */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
* the bulk token size would be exactly (sizeof(krb5_header) +
* blocksize + sizeof(krb5_header) + hashsize)
*/
- LASSERT(blocksize <= ke->ke_conf_size);
- LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
- LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+ LASSERT(blocksz <= ke->ke_conf_size);
+ LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
+ LASSERT(token->len >= sizeof(*khdr) + blocksz + sizeof(*khdr) + 16);
/*
* clear text layout for checksum:
data_desc[0].len = ke->ke_conf_size;
cipher.data = (__u8 *)(khdr + 1);
- cipher.len = blocksize + sizeof(*khdr);
+ cipher.len = blocksz + sizeof(*khdr);
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LBUG();
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
unsigned char *tmpbuf;
- int blocksize, bodysize;
+ int blocksz, bodysize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher_in, plain_out;
rawobj_t hash_objs[3];
/* block size */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
*/
bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
- if (bodysize % blocksize) {
+ if (bodysize % blocksz) {
CERROR("odd bodysize %d\n", bodysize);
return GSS_S_DEFECTIVE_TOKEN;
}
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct crypto_blkcipher *arc4_tfm;
+ struct crypto_sync_skcipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (crypto_blkcipher_setkey(arc4_tfm,
- arc4_keye.data, arc4_keye.len)) {
+ if (crypto_sync_skcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
GOTO(arc4_out_tfm, rc = -EACCES);
rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in,
&plain_out, 0);
arc4_out_tfm:
- crypto_free_blkcipher(arc4_tfm);
+ crypto_free_sync_skcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- int blocksize;
+ int blocksz;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher, plain;
rawobj_t data_desc[1];
/* block size */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
- blocksize = 1;
+ blocksz = 1;
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksz = crypto_sync_skcipher_blocksize(kctx->kc_keye.kb_tfm);
}
- LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+ LASSERT(sizeof(*khdr) >= blocksz && sizeof(*khdr) % blocksz == 0);
/*
* token format is expected as:
* | krb5 header | head/tail cipher text | cksum |
* -----------------------------------------------
*/
- if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+ if (token->len < sizeof(*khdr) + blocksz + sizeof(*khdr) +
ke->ke_hash_size) {
CERROR("short token size: %u\n", token->len);
return GSS_S_DEFECTIVE_TOKEN;
}
cipher.data = (__u8 *) (khdr + 1);
- cipher.len = blocksize + sizeof(*khdr);
+ cipher.len = blocksz + sizeof(*khdr);
plain.data = cipher.data;
plain.len = cipher.len;
* ------------------------------------------
*/
data_desc[0].data = plain.data;
- data_desc[0].len = blocksize;
+ data_desc[0].len = blocksz;
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 1, data_desc,
return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
- if (memcmp(plain.data + blocksize + sizeof(*khdr),
+ if (memcmp(plain.data + blocksz + sizeof(*khdr),
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
CERROR("checksum mismatch\n");