static struct krb5_enctype enctypes[] = {
[ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
"des-cbc-md5",
- "des",
+ "cbc(des)",
"md5",
- CRYPTO_TFM_MODE_CBC,
+ 0,
16,
8,
0,
},
[ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
"des3-hmac-sha1",
- "des3_ede",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(des3_ede)",
+ "hmac(sha1)",
+ 0,
20,
8,
1,
},
[ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
"aes128-cts-hmac-sha1-96",
- "aes",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
12,
16,
1,
},
[ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
"aes256-cts-hmac-sha1-96",
- "aes",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
12,
16,
1,
},
[ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
"arcfour-hmac-md5",
- "arc4",
- "md5",
- CRYPTO_TFM_MODE_ECB,
+ "ecb(arc4)",
+ "hmac(md5)",
+ 0,
16,
8,
1,
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode);
+ kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
if (kb->kb_tfm == NULL) {
CERROR("failed to alloc tfm: %s, mode %d\n",
alg_name, alg_mode);
return -1;
}
- if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- crypto_free_tfm(kb->kb_tfm);
+ ll_crypto_free_blkcipher(kb->kb_tfm);
}
static
knew->kc_cfx = kctx->kc_cfx;
knew->kc_seed_init = kctx->kc_seed_init;
knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
-#if 0
knew->kc_endtime = kctx->kc_endtime;
-#else
- /* FIXME reverse context don't expire for now */
- knew->kc_endtime = INT_MAX;
-#endif
+
memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
knew->kc_seq_send = kctx->kc_seq_recv;
knew->kc_seq_recv = kctx->kc_seq_send;
}
static
-__u32 krb5_encrypt(struct crypto_tfm *tfm,
+__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
int decrypt,
void * iv,
void * in,
void * out,
int length)
{
- struct scatterlist sg;
+ struct blkcipher_desc desc;
+ struct scatterlist sg;
__u8 local_iv[16] = {0};
__u32 ret = -EINVAL;
LASSERT(tfm);
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags= 0;
- if (length % crypto_tfm_alg_blocksize(tfm) != 0) {
+ if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, crypto_tfm_alg_blocksize(tfm));
+ length, ll_crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (crypto_tfm_alg_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm));
+ if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
+ memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv);
+ ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv);
+ ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
}
static inline
-int krb5_digest_hmac(struct crypto_tfm *tfm,
+int krb5_digest_hmac(struct ll_crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
rawobj_t *cksum)
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int i;
+
+ ll_crypto_hash_setkey(tfm, key->data, key->len);
+ desc.tfm = tfm;
+ desc.flags= 0;
+
+ ll_crypto_hash_init(&desc);
+
+ for (i = 0; i < msgcnt; i++) {
+ if (msgs[i].len == 0)
+ continue;
+ buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
+ ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ }
+
+ if (khdr) {
+ buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
+ ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ }
+
+ return ll_crypto_hash_final(&desc, cksum->data);
+}
+#else /* HAVE_ASYNC_BLOCK_CIPHER */
{
struct scatterlist sg[1];
__u32 keylen = key->len, i;
crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
return 0;
}
+#endif /* HAVE_ASYNC_BLOCK_CIPHER */
static inline
-int krb5_digest_norm(struct crypto_tfm *tfm,
+int krb5_digest_norm(struct ll_crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
rawobj_t *cksum)
{
+ struct hash_desc desc;
struct scatterlist sg[1];
int i;
LASSERT(kb->kb_tfm);
+ desc.tfm = tfm;
+ desc.flags= 0;
- crypto_digest_init(tfm);
+ ll_crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_digest_update(tfm, sg, 1);
+ ll_crypto_hash_update(&desc, sg, msgs[i].len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_digest_update(tfm, sg, 1);
+ ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- crypto_digest_final(tfm, cksum->data);
+ ll_crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
int msgcnt, rawobj_t *msgs,
rawobj_t *cksum)
{
- struct krb5_enctype *ke = &enctypes[enctype];
- struct crypto_tfm *tfm;
- __u32 code = GSS_S_FAILURE;
- int rc;
+ struct krb5_enctype *ke = &enctypes[enctype];
+ struct ll_crypto_hash *tfm;
+ __u32 code = GSS_S_FAILURE;
+ int rc;
- if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) {
+ if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
- cksum->len = crypto_tfm_alg_digestsize(tfm);
+ cksum->len = ll_crypto_hash_digestsize(tfm);
OBD_ALLOC(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- crypto_free_tfm(tfm);
+ ll_crypto_free_hash(tfm);
return code;
}
}
static
-int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
+int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
rawobj_t *outobj,
int enc)
{
- struct scatterlist src, dst;
- __u8 local_iv[16] = {0}, *buf;
- __u32 datalen = 0;
- int i, rc;
+ struct blkcipher_desc desc;
+ struct scatterlist src, dst;
+ __u8 local_iv[16] = {0}, *buf;
+ __u32 datalen = 0;
+ int i, rc;
ENTRY;
buf = outobj->data;
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags = 0;
for (i = 0; i < inobj_cnt; i++) {
LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
if (mode_ecb) {
if (enc)
- rc = crypto_cipher_encrypt(
- tfm, &dst, &src, src.length);
+ rc = ll_crypto_blkcipher_encrypt(
+ &desc, &dst, &src, src.length);
else
- rc = crypto_cipher_decrypt(
- tfm, &dst, &src, src.length);
+ rc = ll_crypto_blkcipher_decrypt(
+ &desc, &dst, &src, src.length);
} else {
if (enc)
- rc = crypto_cipher_encrypt_iv(
- tfm, &dst, &src, src.length, local_iv);
+ rc = ll_crypto_blkcipher_encrypt_iv(
+ &desc, &dst, &src, src.length);
else
- rc = crypto_cipher_decrypt_iv(
- tfm, &dst, &src, src.length, local_iv);
+ rc = ll_crypto_blkcipher_decrypt_iv(
+ &desc, &dst, &src, src.length);
}
if (rc) {
static
__u32 gss_wrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
rawobj_t *msg,
int msg_buflen,
rawobj_t *token)
unsigned char acceptor_flag;
int blocksize;
rawobj_t cksum = RAWOBJ_EMPTY;
- rawobj_t data_desc[3], cipher;
+ rawobj_t data_desc[4], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
int enc_rc = 0;
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm));
+ ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
- acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
+ /*
+ * final token format:
+ * ---------------------------------------------------
+ * | krb5 header | cipher text | checksum (16 bytes) |
+ * ---------------------------------------------------
+ */
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
khdr = (struct krb5_header *) token->data;
+ acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
- * a tfm, currently only for arcfour-hmac
- */
+ * a tfm, currently only for arcfour-hmac */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
return GSS_S_FAILURE;
/*
- * clear text layout, same for both checksum & encryption:
+ * clear text layout for checksum:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+ data_desc[1].data = gsshdr->data;
+ data_desc[1].len = gsshdr->len;
+ data_desc[2].data = msg->data;
+ data_desc[2].len = msg->len;
+ data_desc[3].data = (__u8 *) khdr;
+ data_desc[3].len = sizeof(*khdr);
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 4, data_desc, &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
* -----------------------------------------
* | confounder | clear msgs | krb5 header |
* -----------------------------------------
data_desc[2].data = (__u8 *) khdr;
data_desc[2].len = sizeof(*khdr);
- /* compute checksum */
- if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 3, data_desc, &cksum))
- return GSS_S_FAILURE;
- LASSERT(cksum.len >= ke->ke_hash_size);
-
- /* encrypting, cipher text will be directly inplace */
+ /* cipher text will be directly inplace */
cipher.data = (__u8 *) (khdr + 1);
cipher.len = token->len - sizeof(*khdr);
LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct crypto_tfm *arc4_tfm;
+ rawobj_t arc4_keye;
+ struct ll_crypto_cipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, &arc4_keye)) {
GOTO(arc4_out, enc_rc = -EACCES);
}
- arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
+ arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (arc4_tfm == NULL) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, enc_rc = -EACCES);
}
- if (crypto_cipher_setkey(arc4_tfm,
- arc4_keye.data, arc4_keye.len)) {
+ if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
GOTO(arc4_out_tfm, enc_rc = -EACCES);
enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
3, data_desc, &cipher, 1);
arc4_out_tfm:
- crypto_free_tfm(arc4_tfm);
+ ll_crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
static
__u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
rawobj_t *token,
rawobj_t *msg)
{
int blocksize, bodysize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher_in, plain_out;
+ rawobj_t hash_objs[3];
__u32 rc = GSS_S_FAILURE, enc_rc = 0;
LASSERT(ke);
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
plain_out.len = bodysize;
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct crypto_tfm *arc4_tfm;
+ rawobj_t arc4_keye;
+ struct ll_crypto_cipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
GOTO(arc4_out, enc_rc = -EACCES);
}
- arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
+ arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (arc4_tfm == NULL) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, enc_rc = -EACCES);
}
- if (crypto_cipher_setkey(arc4_tfm,
+ if (ll_crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- crypto_free_tfm(arc4_tfm);
+ ll_crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
* -----------------------------------------
*/
- /* last part must be identical to the krb5 header */
+ /* verify krb5 header in token is not modified */
if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
sizeof(*khdr))) {
- CERROR("decrypted header mismatch\n");
+ CERROR("decrypted krb5 header mismatch\n");
goto out_free;
}
- /* verify checksum */
+ /* verify checksum, compose clear text as layout:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ hash_objs[0].len = ke->ke_conf_size;
+ hash_objs[0].data = plain_out.data;
+ hash_objs[1].len = gsshdr->len;
+ hash_objs[1].data = gsshdr->data;
+ hash_objs[2].len = plain_out.len - ke->ke_conf_size;
+ hash_objs[2].data = plain_out.data + ke->ke_conf_size;
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 1, &plain_out, &cksum))
+ khdr, 3, hash_objs, &cksum))
goto out_free;
LASSERT(cksum.len >= ke->ke_hash_size);
static
__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx,
+ int decrypt,
int length,
void *in_buf,
void *out_buf)
struct krb5_ctx *kctx = ctx->internal_ctx_id;
__u32 rc;
- rc = krb5_encrypt(kctx->kc_keye.kb_tfm, 0,
+ rc = krb5_encrypt(kctx->kc_keye.kb_tfm, decrypt,
NULL, in_buf, out_buf, length);
if (rc)
CERROR("plain encrypt error: %d\n", rc);
struct krb5_ctx *kctx = ctx->internal_ctx_id;
int written;
- written = snprintf(buf, bufsize,
- " mech: krb5\n"
- " enctype: %s\n",
- enctype2str(kctx->kc_enctype));
+ written = snprintf(buf, bufsize, "krb5 (%s)",
+ enctype2str(kctx->kc_enctype));
return written;
}
static struct subflavor_desc gss_kerberos_sfs[] = {
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
.sf_qop = 0,
- .sf_service = SPTLRPC_SVC_NONE,
- .sf_name = "krb5"
+ .sf_service = SPTLRPC_SVC_NULL,
+ .sf_name = "krb5n"
},
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
.sf_qop = 0,
.sf_service = SPTLRPC_SVC_AUTH,
+ .sf_name = "krb5a"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_INTG,
.sf_name = "krb5i"
},
{
.gm_oid = (rawobj_t)
{9, "\052\206\110\206\367\022\001\002\002"},
.gm_ops = &gss_kerberos_ops,
- .gm_sf_num = 3,
+ .gm_sf_num = 4,
.gm_sfs = gss_kerberos_sfs,
};