Currently the GSS code for Lustre directly uses the linux crypto API.
The GSS code uses struct crypto_hash which has now been removed in
newer kernels for struct crypto_ahash. It is possible in the future
that we could run into this issue again so to make porting easier
lets move the GSS code to the libcfs crypto api. That way in the
future when the linux crypto api changes the libcfs layer will handle
these changes so GSS will not need further patches. This patch also
exposes some of the libcfs crypto functions to user land as well.
Change-Id: I7baed64d0340ad864732a782ea401e2e0e9ae1b7
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/23289
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Tested-by: Oleg Drokin <oleg.drokin@intel.com>
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
struct cfs_crypto_hash_desc *
cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
struct cfs_crypto_hash_desc *
cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
-enum sk_hmac_alg {
- SK_HMAC_INVALID = -1,
- SK_HMAC_EMPTY = 0,
- SK_HMAC_SHA256 = 1,
- SK_HMAC_SHA512 = 2,
- SK_HMAC_MAX = 3,
-};
-
- char *sct_name;
- size_t sct_bytes;
-};
-
-struct sk_hmac_type {
- char *sht_name;
- size_t sht_bytes;
+ char *cht_name;
+ unsigned int cht_key;
+ unsigned int cht_bytes;
-int gss_digest_hmac(struct crypto_hash *tfm,
+int gss_digest_hmac(struct crypto_ahash *tfm,
rawobj_t *key,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
rawobj_t *key,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct hash_desc desc = {
- .tfm = tfm,
- .flags = 0,
- };
+ struct ahash_request *req;
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
- rc = crypto_hash_setkey(tfm, key->data, key->len);
+ rc = crypto_ahash_setkey(tfm, key->data, key->len);
- rc = crypto_hash_init(&desc);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+ }
+
+ rc = crypto_ahash_init(req);
rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
if (rc != 0)
return rc;
rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
if (rc != 0)
return rc;
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
+ ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+
+ ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
if (rc != 0)
return rc;
rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
if (rc != 0)
return rc;
- rc = crypto_hash_update(&desc, sg, sizeof(hdr->len));
+
+ ahash_request_set_crypt(req, sg, NULL, sizeof(hdr->len));
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
- return crypto_hash_final(&desc, cksum->data);
+ return crypto_ahash_final(req);
-int gss_digest_norm(struct crypto_hash *tfm,
+int gss_digest_norm(struct crypto_ahash *tfm,
struct gss_keyblock *kb,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
struct gss_keyblock *kb,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
+ struct ahash_request *req;
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
LASSERT(kb->kb_tfm);
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
LASSERT(kb->kb_tfm);
- desc.tfm = tfm;
- desc.flags = 0;
- rc = crypto_hash_init(&desc);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+ }
+
+ rc = crypto_ahash_init(req);
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
+ ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
- rc = crypto_hash_update(&desc, sg, sizeof(*hdr));
+ ahash_request_set_crypt(req, sg, NULL, sizeof(*hdr));
+ if (rc)
+ return rc;
+ rc = crypto_ahash_update(req);
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
- rc = crypto_hash_final(&desc, cksum->data);
+ rc = crypto_ahash_final(req);
void gss_teardown_sgtable(struct sg_table *sgt);
int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
const void *in, void *out, size_t length);
void gss_teardown_sgtable(struct sg_table *sgt);
int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
const void *in, void *out, size_t length);
-int gss_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, rawobj_t *hdr,
+int gss_digest_hmac(struct crypto_ahash *tfm, rawobj_t *key, rawobj_t *hdr,
int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum);
int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum);
-int gss_digest_norm(struct crypto_hash *tfm, struct gss_keyblock *kb,
+int gss_digest_norm(struct crypto_ahash *tfm, struct gss_keyblock *kb,
rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt,
lnet_kiov_t *iovs, rawobj_t *cksum);
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt,
lnet_kiov_t *iovs, rawobj_t *cksum);
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
#define __PTLRPC_GSS_GSS_INTERNAL_H_
#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
#define __PTLRPC_GSS_GSS_INTERNAL_H_
-#include <linux/crypto.h>
+#include <crypto/hash.h>
#include <lustre_sec.h>
/*
#include <lustre_sec.h>
/*
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct crypto_hash *tfm;
+ struct crypto_ahash *tfm;
rawobj_t hdr;
__u32 code = GSS_S_FAILURE;
int rc;
rawobj_t hdr;
__u32 code = GSS_S_FAILURE;
int rc;
- if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+ tfm = crypto_alloc_ahash(ke->ke_hash_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
- cksum->len = crypto_hash_digestsize(tfm);
+ cksum->len = crypto_ahash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
+ crypto_free_ahash(tfm);
#include <linux/mutex.h>
#include <crypto/ctr.h>
#include <linux/mutex.h>
#include <crypto/ctr.h>
+#include <libcfs/libcfs_crypto.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#define SK_IV_REV_START (1ULL << 63)
struct sk_ctx {
#define SK_IV_REV_START (1ULL << 63)
struct sk_ctx {
__u16 sc_crypt;
__u32 sc_expire;
__u32 sc_host_random;
__u16 sc_crypt;
__u32 sc_expire;
__u32 sc_host_random;
atomic64_t sc_iv;
rawobj_t sc_hmac_key;
struct gss_keyblock sc_session_kb;
atomic64_t sc_iv;
rawobj_t sc_hmac_key;
struct gss_keyblock sc_session_kb;
+ enum cfs_crypto_hash_alg sc_hmac;
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
- .sct_name = "ctr(aes)",
- .sct_bytes = 32,
- },
-};
-
-static struct sk_hmac_type sk_hmac_types[] = {
- [SK_HMAC_SHA256] = {
- .sht_name = "hmac(sha256)",
- .sht_bytes = 32,
- },
- [SK_HMAC_SHA512] = {
- .sht_name = "hmac(sha512)",
- .sht_bytes = 64,
+ .cht_name = "aes256",
+ .cht_key = 0,
+ .cht_bytes = 32,
static int sk_init_keys(struct sk_ctx *skc)
{
return gss_keyblock_init(&skc->sc_session_kb,
static int sk_init_keys(struct sk_ctx *skc)
{
return gss_keyblock_init(&skc->sc_session_kb,
- sk_crypt_types[skc->sc_crypt].sct_name, 0);
+ sk_crypt_types[skc->sc_crypt].cht_name, 0);
}
static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
}
static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
CERROR("Failed to read HMAC algorithm type");
return -1;
}
CERROR("Failed to read HMAC algorithm type");
return -1;
}
- if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) {
+ if (skc->sc_hmac >= CFS_HASH_ALG_MAX) {
CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
return -1;
}
CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
return -1;
}
-__u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
- int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
+__u32 sk_make_hmac(const char *alg_name, rawobj_t *key, int msg_count,
+ rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
- struct crypto_hash *tfm;
+ struct crypto_ahash *tfm;
- tfm = crypto_alloc_hash(alg_name, 0, 0);
+ tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return GSS_S_FAILURE;
rc = GSS_S_FAILURE;
if (IS_ERR(tfm))
return GSS_S_FAILURE;
rc = GSS_S_FAILURE;
- LASSERT(token->len >= crypto_hash_digestsize(tfm));
+ LASSERT(token->len >= crypto_ahash_digestsize(tfm));
if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
token))
rc = GSS_S_COMPLETE;
if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
token))
rc = GSS_S_COMPLETE;
+ crypto_free_ahash(tfm);
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
+ return sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac),
&skc->sc_hmac_key, message_count, messages,
iov_count, iovs, token);
}
static
&skc->sc_hmac_key, message_count, messages,
iov_count, iovs, token);
}
static
-__u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
- rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
- rawobj_t *token)
+u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
+ int message_count, rawobj_t *messages, int iov_count,
+ lnet_kiov_t *iovs, rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
__u32 rc = GSS_S_FAILURE;
{
rawobj_t checksum = RAWOBJ_EMPTY;
__u32 rc = GSS_S_FAILURE;
- checksum.len = sht->sht_bytes;
+ checksum.len = cfs_crypto_hash_digestsize(algo);
+ /* What about checksum.len == 0 ??? */
+
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
if (!checksum.data)
return rc;
- if (sk_make_hmac(sht->sht_name, key, message_count, messages,
- iov_count, iovs, &checksum)) {
+ if (sk_make_hmac(cfs_crypto_hash_name(algo), key, message_count,
+ messages, iov_count, iovs, &checksum)) {
CDEBUG(D_SEC, "Failed to create checksum to validate\n");
goto cleanup;
}
CDEBUG(D_SEC, "Failed to create checksum to validate\n");
goto cleanup;
}
* to decrypt up to the number of bytes actually specified from the sender
* (bd_nob) otherwise the calulated HMAC will be incorrect. */
static
* to decrypt up to the number of bytes actually specified from the sender
* (bd_nob) otherwise the calulated HMAC will be incorrect. */
static
-__u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
- int msgcnt, rawobj_t *msgs, int iovcnt,
- lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
+__u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac,
+ rawobj_t *key, int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs, int iov_bytes,
+ rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
{
rawobj_t checksum = RAWOBJ_EMPTY;
- struct crypto_hash *tfm;
- struct hash_desc desc = {
- .tfm = NULL,
- .flags = 0,
- };
- struct scatterlist sg[1];
- struct sg_table sgt;
- int bytes;
- int i;
- int rc = GSS_S_FAILURE;
+ struct cfs_crypto_hash_desc *hdesc;
+ int rc = GSS_S_FAILURE, i;
- checksum.len = sht->sht_bytes;
+ checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
if (!checksum.data)
return rc;
- tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
- if (IS_ERR(tfm))
- goto cleanup;
-
- desc.tfm = tfm;
-
- LASSERT(token->len >= crypto_hash_digestsize(tfm));
-
- rc = crypto_hash_setkey(tfm, key->data, key->len);
- if (rc)
- goto hash_cleanup;
-
- rc = crypto_hash_init(&desc);
- if (rc)
- goto hash_cleanup;
-
for (i = 0; i < msgcnt; i++) {
for (i = 0; i < msgcnt; i++) {
- rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
- if (rc != 0)
- goto hash_cleanup;
-
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
- if (rc) {
- gss_teardown_sgtable(&sgt);
- goto hash_cleanup;
- }
+ rc = cfs_crypto_hash_digest(sc_hmac, msgs[i].data, msgs[i].len,
+ key->data, key->len,
+ checksum.data, &checksum.len);
+ if (rc)
+ goto cleanup;
+ }
- gss_teardown_sgtable(&sgt);
+ hdesc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
+ if (IS_ERR(hdesc)) {
+ rc = PTR_ERR(hdesc);
+ goto cleanup;
}
for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
}
for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
if (iovs[i].kiov_len == 0)
continue;
bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
iov_bytes -= bytes;
if (iovs[i].kiov_len == 0)
continue;
bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
iov_bytes -= bytes;
-
- sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
- iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, bytes);
+ rc = cfs_crypto_hash_update_page(hdesc, iovs[i].kiov_page,
+ iovs[i].kiov_offset, bytes);
- crypto_hash_final(&desc, checksum.data);
+ rc = cfs_crypto_hash_final(hdesc, checksum.data, &checksum.len);
+ if (rc)
+ goto cleanup;
if (memcmp(token->data, checksum.data, checksum.len)) {
rc = GSS_S_BAD_SIG;
if (memcmp(token->data, checksum.data, checksum.len)) {
rc = GSS_S_BAD_SIG;
-
-hash_cleanup:
- crypto_free_hash(tfm);
-
cleanup:
OBD_FREE_LARGE(checksum.data, checksum.len);
cleanup:
OBD_FREE_LARGE(checksum.data, checksum.len);
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key,
+ return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
message_count, messages, iov_count, iovs, token);
}
message_count, messages, iov_count, iovs, token);
}
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr skh;
rawobj_t msgbufs[3];
struct sk_wire skw;
struct sk_hdr skh;
rawobj_t msgbufs[3];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
&skw.skw_cipher, 1))
return GSS_S_FAILURE;
if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
&skw.skw_cipher, 1))
return GSS_S_FAILURE;
msgbufs[2] = skw.skw_cipher;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
msgbufs[2] = skw.skw_cipher;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
- if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0,
- NULL, &skw.skw_hmac))
+ skw.skw_hmac.len = sht_bytes;
+ if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
+ 3, msgbufs, 0, NULL, &skw.skw_hmac))
return GSS_S_FAILURE;
token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
return GSS_S_FAILURE;
token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
rawobj_t *token, rawobj_t *message)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
rawobj_t *token, rawobj_t *message)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr *skh;
rawobj_t msgbufs[3];
struct sk_wire skw;
struct sk_hdr *skh;
rawobj_t msgbufs[3];
LASSERT(skc->sc_session_kb.kb_tfm);
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht->sht_bytes)
+ if (token->len < sizeof(skh) + sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
+ skw.skw_hmac.len = sht_bytes;
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
if (skw.skw_cipher.len % blocksize != 0)
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
if (skw.skw_cipher.len % blocksize != 0)
msgbufs[0] = skw.skw_header;
msgbufs[1] = *gss_header;
msgbufs[2] = skw.skw_cipher;
msgbufs[0] = skw.skw_header;
msgbufs[1] = *gss_header;
msgbufs[2] = skw.skw_cipher;
- rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL,
- &skw.skw_hmac);
+ rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
+ 0, NULL, &skw.skw_hmac);
int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr skh;
__u8 local_iv[SK_IV_SIZE];
struct sk_wire skw;
struct sk_hdr skh;
__u8 local_iv[SK_IV_SIZE];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
desc, &skw.skw_cipher, adj_nob))
return GSS_S_FAILURE;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
desc, &skw.skw_cipher, adj_nob))
return GSS_S_FAILURE;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
- if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher,
- desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
+ skw.skw_hmac.len = sht_bytes;
+ if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
+ 1, &skw.skw_cipher, desc->bd_iov_count,
+ GET_ENC_KIOV(desc), &skw.skw_hmac))
return GSS_S_FAILURE;
return GSS_S_COMPLETE;
return GSS_S_FAILURE;
return GSS_S_COMPLETE;
rawobj_t *token, int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
rawobj_t *token, int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr *skh;
__u8 local_iv[SK_IV_SIZE];
struct sk_wire skw;
struct sk_hdr *skh;
__u8 local_iv[SK_IV_SIZE];
LASSERT(skc->sc_session_kb.kb_tfm);
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht->sht_bytes)
+ if (token->len < sizeof(skh) + sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
+ skw.skw_hmac.len = cfs_crypto_hash_digestsize(skc->sc_hmac);
skh = (struct sk_hdr *)skw.skw_header.data;
rc = sk_verify_header(skh);
if (rc != GSS_S_COMPLETE)
return rc;
skh = (struct sk_hdr *)skw.skw_header.data;
rc = sk_verify_header(skh);
if (rc != GSS_S_COMPLETE)
return rc;
- rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
+ rc = sk_verify_bulk_hmac(skc->sc_hmac,
&skc->sc_hmac_key, 1, &skw.skw_cipher,
desc->bd_iov_count, GET_ENC_KIOV(desc),
desc->bd_nob, &skw.skw_hmac);
&skc->sc_hmac_key, 1, &skw.skw_cipher,
desc->bd_iov_count, GET_ENC_KIOV(desc),
desc->bd_nob, &skw.skw_hmac);
* Author: Jeremy Filizetti <jfilizet@iu.edu>
*/
* Author: Jeremy Filizetti <jfilizet@iu.edu>
*/
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
[SK_CRYPT_AES256_CTR] = "AES-256-CTR",
};
[SK_CRYPT_AES256_CTR] = "AES-256-CTR",
};
-char *sk_hmac2name[] = {
- [SK_HMAC_EMPTY] = "NONE",
- [SK_HMAC_SHA256] = "SHA256",
- [SK_HMAC_SHA512] = "SHA512",
-};
+const char *sk_hmac2name[] = { "NONE", "SHA256", "SHA512" };
static int sk_name2crypt(char *name)
{
static int sk_name2crypt(char *name)
{
return SK_CRYPT_INVALID;
}
return SK_CRYPT_INVALID;
}
-static int sk_name2hmac(char *name)
+enum cfs_crypto_hash_alg sk_name2hmac(char *name)
+ enum cfs_crypto_hash_alg algo;
+ int i = 0;
- for (i = 0; i < SK_HMAC_MAX; i++) {
- if (strcasecmp(name, sk_hmac2name[i]) == 0)
- return i;
+ /* convert to lower case */
+ while (name[i]) {
+ putchar(tolower(name[i]));
+ i++;
- return SK_HMAC_INVALID;
+ if (strcmp(name, "none"))
+ return CFS_HASH_ALG_NULL;
+
+ algo = cfs_crypto_hash_alg(name);
+ if ((algo != CFS_HASH_ALG_SHA256) ||
+ (algo != CFS_HASH_ALG_SHA512))
+ return SK_HMAC_INVALID;
+
+ return algo;
}
static void usage(FILE *fp, char *program)
}
static void usage(FILE *fp, char *program)
fprintf(fp, "-i|--hmac <num> Hash algorithm for integrity "
"(Default: SHA256)\n");
fprintf(fp, "-i|--hmac <num> Hash algorithm for integrity "
"(Default: SHA256)\n");
- for (i = 1; i < SK_HMAC_MAX; i++)
+ for (i = 1; i < sizeof(sk_hmac2name) / sizeof(sk_hmac2name[0]); i++)
fprintf(fp, " %s\n", sk_hmac2name[i]);
fprintf(fp, "-e|--expire <num> Seconds before contexts from "
fprintf(fp, " %s\n", sk_hmac2name[i]);
fprintf(fp, "-e|--expire <num> Seconds before contexts from "
printf(" client");
printf("\n");
printf("HMAC alg: %s\n", sk_hmac2name[config->skc_hmac_alg]);
printf(" client");
printf("\n");
printf("HMAC alg: %s\n", sk_hmac2name[config->skc_hmac_alg]);
- printf("Crypto alg: %s\n", sk_crypt2name[config->skc_crypt_alg]);
+ printf("Crypto alg: %s\n", cfs_crypto_hash_name(config->skc_hmac_alg));
printf("Ctx Expiration: %u seconds\n", config->skc_expire);
printf("Shared keylen: %u bits\n", config->skc_shared_keylen);
printf("Prime length: %u bits\n", config->skc_prime_bits);
printf("Ctx Expiration: %u seconds\n", config->skc_expire);
printf("Shared keylen: %u bits\n", config->skc_shared_keylen);
printf("Prime length: %u bits\n", config->skc_prime_bits);
char *tmp;
char *tmp2;
int crypt = SK_CRYPT_EMPTY;
char *tmp;
char *tmp2;
int crypt = SK_CRYPT_EMPTY;
- int hmac = SK_HMAC_EMPTY;
+ enum cfs_crypto_hash_alg hmac = CFS_HASH_ALG_NULL;
int expire = -1;
int shared_keylen = -1;
int prime_bits = -1;
int expire = -1;
int shared_keylen = -1;
int prime_bits = -1;
config->skc_shared_keylen = SK_DEFAULT_SK_KEYLEN;
config->skc_prime_bits = SK_DEFAULT_PRIME_BITS;
config->skc_crypt_alg = SK_CRYPT_AES256_CTR;
config->skc_shared_keylen = SK_DEFAULT_SK_KEYLEN;
config->skc_prime_bits = SK_DEFAULT_PRIME_BITS;
config->skc_crypt_alg = SK_CRYPT_AES256_CTR;
- config->skc_hmac_alg = SK_HMAC_SHA256;
+ config->skc_hmac_alg = CFS_HASH_ALG_SHA256;
for (i = 0; i < MAX_MGSNIDS; i++)
config->skc_mgsnids[i] = LNET_NID_ANY;
for (i = 0; i < MAX_MGSNIDS; i++)
config->skc_mgsnids[i] = LNET_NID_ANY;
if (crypt != SK_CRYPT_EMPTY)
config->skc_crypt_alg = crypt;
if (crypt != SK_CRYPT_EMPTY)
config->skc_crypt_alg = crypt;
- if (hmac != SK_HMAC_EMPTY)
+ if (hmac != CFS_HASH_ALG_NULL)
config->skc_hmac_alg = hmac;
if (expire != -1)
config->skc_expire = expire;
config->skc_hmac_alg = hmac;
if (expire != -1)
config->skc_expire = expire;
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
- .sct_name = "ctr(aes)",
- .sct_bytes = 32,
+ .cht_name = "ctr(aes)",
+ .cht_bytes = 32,
static struct sk_hmac_type sk_hmac_types[] = {
[SK_HMAC_SHA256] = {
static struct sk_hmac_type sk_hmac_types[] = {
[SK_HMAC_SHA256] = {
- .sht_name = "hmac(sha256)",
- .sht_bytes = 32,
+ .cht_name = "sha256",
+ .cht_bytes = 32,
- .sht_name = "hmac(sha512)",
- .sht_bytes = 64,
+ .cht_name = "sha512",
+ .cht_bytes = 64,
#ifdef _NEW_BUILD_
# include "lgss_utils.h"
#ifdef _NEW_BUILD_
# include "lgss_utils.h"
printerr(0, "Invalid version\n");
return -1;
}
printerr(0, "Invalid version\n");
return -1;
}
- if (config->skc_hmac_alg >= SK_HMAC_MAX) {
+ if ((config->skc_hmac_alg != CFS_HASH_ALG_SHA256) &&
+ (config->skc_hmac_alg != CFS_HASH_ALG_SHA512)) {
printerr(0, "Invalid HMAC algorithm\n");
return -1;
}
printerr(0, "Invalid HMAC algorithm\n");
return -1;
}
-static inline const EVP_MD *sk_hash_to_evp_md(enum sk_hmac_alg alg)
+static inline const EVP_MD *sk_hash_to_evp_md(enum cfs_crypto_hash_alg alg)
+ case CFS_HASH_ALG_SHA256:
+ case CFS_HASH_ALG_SHA512:
return EVP_sha512();
default:
return EVP_md_null();
return EVP_sha512();
default:
return EVP_md_null();
* If the size is smaller it will take copy the first N bytes necessary to
* fill the derived key. */
int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key,
* If the size is smaller it will take copy the first N bytes necessary to
* fill the derived key. */
int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key,
- gss_buffer_desc *key_binding_bufs, int numbufs, int hmac_alg)
+ gss_buffer_desc *key_binding_bufs, int numbufs,
+ enum cfs_crypto_hash_alg hmac_alg)
{
size_t remain;
size_t bytes;
{
size_t remain;
size_t bytes;
- if (sk_hmac_types[hmac_alg].sht_bytes != tmp_hash.length) {
+ if (cfs_crypto_hash_digestsize(hmac_alg) != tmp_hash.length) {
free(tmp_hash.value);
return -EINVAL;
}
free(tmp_hash.value);
return -EINVAL;
}
gss_buffer_desc bufs[5];
int rc = -1;
gss_buffer_desc bufs[5];
int rc = -1;
- session_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
+ session_key->length = sk_crypt_types[kctx->skc_crypt_alg].cht_bytes;
session_key->value = malloc(session_key->length);
if (!session_key->value) {
printerr(0, "Failed to allocate memory for session key\n");
session_key->value = malloc(session_key->length);
if (!session_key->value) {
printerr(0, "Failed to allocate memory for session key\n");
char *integrity = "Integrity";
int rc;
char *integrity = "Integrity";
int rc;
- hmac_key->length = sk_hmac_types[kctx->skc_hmac_alg].sht_bytes;
+ hmac_key->length = cfs_crypto_hash_digestsize(kctx->skc_hmac_alg);
hmac_key->value = malloc(hmac_key->length);
if (!hmac_key->value)
return -ENOMEM;
hmac_key->value = malloc(hmac_key->length);
if (!hmac_key->value)
return -ENOMEM;
if ((skc->sc_flags & LGSS_SVC_PRIV) == 0)
return 0;
if ((skc->sc_flags & LGSS_SVC_PRIV) == 0)
return 0;
- encrypt_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
+ encrypt_key->length = cfs_crypto_hash_digestsize(kctx->skc_hmac_alg);
encrypt_key->value = malloc(encrypt_key->length);
if (!encrypt_key->value)
return -ENOMEM;
encrypt_key->value = malloc(encrypt_key->length);
if (!encrypt_key->value)
return -ENOMEM;
#include <openssl/evp.h>
#include <sys/types.h>
#include <openssl/evp.h>
#include <sys/types.h>
+#include <libcfs/libcfs_crypto.h>
#include "lsupport.h"
/* Some limits and defaults */
#include "lsupport.h"
/* Some limits and defaults */
+#define SK_HMAC_INVALID 0xFF
+
/* String consisting of "lustre:fsname:nodemap_hash" */
#define SK_DESCRIPTION_SIZE (9 + MTI_NAME_MAXLEN + LUSTRE_NODEMAP_NAME_LENGTH)
/* String consisting of "lustre:fsname:nodemap_hash" */
#define SK_DESCRIPTION_SIZE (9 + MTI_NAME_MAXLEN + LUSTRE_NODEMAP_NAME_LENGTH)
/* File format version */
uint32_t skc_version;
/* HMAC algorithm used for message integrity */
/* File format version */
uint32_t skc_version;
/* HMAC algorithm used for message integrity */
+ enum cfs_crypto_hash_alg skc_hmac_alg;
/* Crypt algorithm used for privacy mode */
uint16_t skc_crypt_alg;
/* Number of seconds that a context is valid after it is created from
/* Crypt algorithm used for privacy mode */
uint16_t skc_crypt_alg;
/* Number of seconds that a context is valid after it is created from