]) # LC_CONFIG_SUNRPC
#
+# LC_HAVE_CRYPTO_HASH
+#
+# 4.6 kernel commit 896545098777564212b9e91af4c973f094649aa7
+# removed crypto_hash support. Since GSS only works with
+# crypto_hash it has to be disabled for newer distros.
+#
+AC_DEFUN([LC_HAVE_CRYPTO_HASH], [
+LB_CHECK_COMPILE([if crypto_hash API is supported],
+crypto_hash, [
+ #include <linux/crypto.h>
+],[
+ crypto_hash_digestsize(NULL);
+], [], [enable_gss="no"])
+])
+]) # LC_HAVE_CRYPTO_HASH
+
+#
# LC_CONFIG_GSS (default 'auto' (tests for dependencies, if found, enables))
#
# Build gss and related tools of Lustre. Currently both kernel and user space
AS_IF([test "x$enable_gss" != xno], [
LC_CONFIG_GSS_KEYRING
+ LC_HAVE_CRYPTO_HASH
LC_HAVE_CRED_TGCRED
LC_KEY_TYPE_INSTANTIATE_2ARGS
sunrpc_required=$enable_gss
SK_CRYPT_MAX = 2,
};
+enum sk_hmac_alg {
+ SK_HMAC_INVALID = -1,
+ SK_HMAC_EMPTY = 0,
+ SK_HMAC_SHA256 = 1,
+ SK_HMAC_SHA512 = 2,
+ SK_HMAC_MAX = 3,
+};
+
struct sk_crypt_type {
- char *cht_name;
- unsigned int cht_key;
- unsigned int cht_bytes;
+ char *sct_name;
+ size_t sct_bytes;
+};
+
+struct sk_hmac_type {
+ char *sht_name;
+ size_t sht_bytes;
};
/** @} lustreuser */
return ret;
}
-int gss_digest_hmac(struct crypto_ahash *tfm,
+int gss_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct ahash_request *req;
+ struct hash_desc desc = {
+ .tfm = tfm,
+ .flags = 0,
+ };
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
- rc = crypto_ahash_setkey(tfm, key->data, key->len);
+ rc = crypto_hash_setkey(tfm, key->data, key->len);
if (rc)
return rc;
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
-
- rc = crypto_ahash_init(req);
+ rc = crypto_hash_init(&desc);
if (rc)
return rc;
rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
if (rc != 0)
return rc;
- ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, msgs[i].len);
if (rc)
return rc;
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
-
- ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
if (rc)
return rc;
}
rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
if (rc != 0)
return rc;
-
- ahash_request_set_crypt(req, sg, NULL, sizeof(hdr->len));
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, sizeof(hdr->len));
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
- return crypto_ahash_final(req);
+ return crypto_hash_final(&desc, cksum->data);
}
-int gss_digest_norm(struct crypto_ahash *tfm,
+int gss_digest_norm(struct crypto_hash *tfm,
struct gss_keyblock *kb,
rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct ahash_request *req;
+ struct hash_desc desc;
struct scatterlist sg[1];
struct sg_table sgt;
int i;
int rc;
LASSERT(kb->kb_tfm);
+ desc.tfm = tfm;
+ desc.flags = 0;
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
-
- rc = crypto_ahash_init(req);
+ rc = crypto_hash_init(&desc);
if (rc)
return rc;
if (rc != 0)
return rc;
- ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, msgs[i].len);
if (rc)
return rc;
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
if (rc)
return rc;
}
if (rc != 0)
return rc;
- ahash_request_set_crypt(req, sg, NULL, sizeof(*hdr));
- if (rc)
- return rc;
- rc = crypto_ahash_update(req);
+ rc = crypto_hash_update(&desc, sg, sizeof(*hdr));
if (rc)
return rc;
gss_teardown_sgtable(&sgt);
}
- rc = crypto_ahash_final(req);
+ rc = crypto_hash_final(&desc, cksum->data);
if (rc)
return rc;
void gss_teardown_sgtable(struct sg_table *sgt);
int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
const void *in, void *out, size_t length);
-int gss_digest_hmac(struct crypto_ahash *tfm, rawobj_t *key, rawobj_t *hdr,
+int gss_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, rawobj_t *hdr,
int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum);
-int gss_digest_norm(struct crypto_ahash *tfm, struct gss_keyblock *kb,
+int gss_digest_norm(struct crypto_hash *tfm, struct gss_keyblock *kb,
rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt,
lnet_kiov_t *iovs, rawobj_t *cksum);
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
#define __PTLRPC_GSS_GSS_INTERNAL_H_
-#include <crypto/hash.h>
+#include <linux/crypto.h>
#include <lustre_sec.h>
/*
static inline
__u32 import_to_gss_svc(struct obd_import *imp)
{
- int cl_sp_to = LUSTRE_SP_ANY;
+ const char *name = imp->imp_obd->obd_type->typ_name;
- if (imp->imp_obd)
- cl_sp_to = imp->imp_obd->u.cli.cl_sp_to;
-
- switch (cl_sp_to) {
- case LUSTRE_SP_MDT:
+ if (!strcmp(name, LUSTRE_MGC_NAME))
+ return LUSTRE_GSS_TGT_MGS;
+ if (!strcmp(name, LUSTRE_MDC_NAME) ||
+ !strcmp(name, LUSTRE_LWP_NAME))
return LUSTRE_GSS_TGT_MDS;
- case LUSTRE_SP_OST:
+ if (!strcmp(name, LUSTRE_OSC_NAME) ||
+ !strcmp(name, LUSTRE_OSP_NAME))
return LUSTRE_GSS_TGT_OSS;
- case LUSTRE_SP_MGC:
- case LUSTRE_SP_MGS:
- return LUSTRE_GSS_TGT_MGS;
- case LUSTRE_SP_CLI:
- case LUSTRE_SP_ANY:
- default:
- return 0;
- }
+
+ return 0;
}
/*
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct crypto_ahash *tfm;
+ struct crypto_hash *tfm;
rawobj_t hdr;
__u32 code = GSS_S_FAILURE;
int rc;
- tfm = crypto_alloc_ahash(ke->ke_hash_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
+ if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
- cksum->len = crypto_ahash_digestsize(tfm);
+ cksum->len = crypto_hash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- crypto_free_ahash(tfm);
+ crypto_free_hash(tfm);
return code;
}
#include <linux/mutex.h>
#include <crypto/ctr.h>
-#include <libcfs/libcfs_crypto.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#define SK_IV_REV_START (1ULL << 63)
struct sk_ctx {
- __u16 sc_pad;
+ __u16 sc_hmac;
__u16 sc_crypt;
__u32 sc_expire;
__u32 sc_host_random;
atomic64_t sc_iv;
rawobj_t sc_hmac_key;
struct gss_keyblock sc_session_kb;
- enum cfs_crypto_hash_alg sc_hmac;
};
struct sk_hdr {
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
- .cht_name = "aes256",
- .cht_key = 0,
- .cht_bytes = 32,
+ .sct_name = "ctr(aes)",
+ .sct_bytes = 32,
+ },
+};
+
+static struct sk_hmac_type sk_hmac_types[] = {
+ [SK_HMAC_SHA256] = {
+ .sht_name = "hmac(sha256)",
+ .sht_bytes = 32,
+ },
+ [SK_HMAC_SHA512] = {
+ .sht_name = "hmac(sha512)",
+ .sht_bytes = 64,
},
};
static int sk_init_keys(struct sk_ctx *skc)
{
return gss_keyblock_init(&skc->sc_session_kb,
- sk_crypt_types[skc->sc_crypt].cht_name, 0);
+ sk_crypt_types[skc->sc_crypt].sct_name, 0);
}
static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
CERROR("Failed to read HMAC algorithm type");
return -1;
}
- if (skc->sc_hmac >= CFS_HASH_ALG_MAX) {
+ if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) {
CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
return -1;
}
}
static
-__u32 sk_make_hmac(const char *alg_name, rawobj_t *key, int msg_count,
- rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
- rawobj_t *token)
+__u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
+ int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
{
- struct crypto_ahash *tfm;
+ struct crypto_hash *tfm;
int rc;
- tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_hash(alg_name, 0, 0);
if (IS_ERR(tfm))
return GSS_S_FAILURE;
rc = GSS_S_FAILURE;
- LASSERT(token->len >= crypto_ahash_digestsize(tfm));
+ LASSERT(token->len >= crypto_hash_digestsize(tfm));
if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
token))
rc = GSS_S_COMPLETE;
- crypto_free_ahash(tfm);
+ crypto_free_hash(tfm);
return rc;
}
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac),
+ return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
&skc->sc_hmac_key, message_count, messages,
iov_count, iovs, token);
}
static
-u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
- int message_count, rawobj_t *messages, int iov_count,
- lnet_kiov_t *iovs, rawobj_t *token)
+__u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
+ rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
__u32 rc = GSS_S_FAILURE;
- checksum.len = cfs_crypto_hash_digestsize(algo);
- /* What about checksum.len == 0 ??? */
-
+ checksum.len = sht->sht_bytes;
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
- if (sk_make_hmac(cfs_crypto_hash_name(algo), key, message_count,
- messages, iov_count, iovs, &checksum)) {
+ if (sk_make_hmac(sht->sht_name, key, message_count, messages,
+ iov_count, iovs, &checksum)) {
CDEBUG(D_SEC, "Failed to create checksum to validate\n");
goto cleanup;
}
* to decrypt up to the number of bytes actually specified from the sender
* (bd_nob) otherwise the calulated HMAC will be incorrect. */
static
-__u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac,
- rawobj_t *key, int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs, int iov_bytes,
- rawobj_t *token)
+__u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
+ int msgcnt, rawobj_t *msgs, int iovcnt,
+ lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
- struct cfs_crypto_hash_desc *hdesc;
- int rc = GSS_S_FAILURE, i;
+ struct crypto_hash *tfm;
+ struct hash_desc desc = {
+ .tfm = NULL,
+ .flags = 0,
+ };
+ struct scatterlist sg[1];
+ struct sg_table sgt;
+ int bytes;
+ int i;
+ int rc = GSS_S_FAILURE;
- checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
+ checksum.len = sht->sht_bytes;
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
+ tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
+ if (IS_ERR(tfm))
+ goto cleanup;
+
+ desc.tfm = tfm;
+
+ LASSERT(token->len >= crypto_hash_digestsize(tfm));
+
+ rc = crypto_hash_setkey(tfm, key->data, key->len);
+ if (rc)
+ goto hash_cleanup;
+
+ rc = crypto_hash_init(&desc);
+ if (rc)
+ goto hash_cleanup;
+
for (i = 0; i < msgcnt; i++) {
- if (!msgs[i].len)
+ if (msgs[i].len == 0)
continue;
- rc = cfs_crypto_hash_digest(sc_hmac, msgs[i].data, msgs[i].len,
- key->data, key->len,
- checksum.data, &checksum.len);
- if (rc)
- goto cleanup;
- }
+ rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+ if (rc != 0)
+ goto hash_cleanup;
- hdesc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
- if (IS_ERR(hdesc)) {
- rc = PTR_ERR(hdesc);
- goto cleanup;
+ rc = crypto_hash_update(&desc, sg, msgs[i].len);
+ if (rc) {
+ gss_teardown_sgtable(&sgt);
+ goto hash_cleanup;
+ }
+
+ gss_teardown_sgtable(&sgt);
}
for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
- int bytes;
-
if (iovs[i].kiov_len == 0)
continue;
bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
iov_bytes -= bytes;
- rc = cfs_crypto_hash_update_page(hdesc, iovs[i].kiov_page,
- iovs[i].kiov_offset, bytes);
+
+ sg_init_table(sg, 1);
+ sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
+ iovs[i].kiov_offset);
+ rc = crypto_hash_update(&desc, sg, bytes);
if (rc)
- goto cleanup;
+ goto hash_cleanup;
}
- rc = cfs_crypto_hash_final(hdesc, checksum.data, &checksum.len);
- if (rc)
- goto cleanup;
+ crypto_hash_final(&desc, checksum.data);
if (memcmp(token->data, checksum.data, checksum.len)) {
rc = GSS_S_BAD_SIG;
- goto cleanup;
+ goto hash_cleanup;
}
rc = GSS_S_COMPLETE;
+
+hash_cleanup:
+ crypto_free_hash(tfm);
+
cleanup:
OBD_FREE_LARGE(checksum.data, checksum.len);
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
+ return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key,
message_count, messages, iov_count, iovs, token);
}
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
struct sk_wire skw;
struct sk_hdr skh;
rawobj_t msgbufs[3];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
&skw.skw_cipher, 1))
return GSS_S_FAILURE;
msgbufs[2] = skw.skw_cipher;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht_bytes;
- if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
- 3, msgbufs, 0, NULL, &skw.skw_hmac))
+ skw.skw_hmac.len = sht->sht_bytes;
+ if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0,
+ NULL, &skw.skw_hmac))
return GSS_S_FAILURE;
token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
rawobj_t *token, rawobj_t *message)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
struct sk_wire skw;
struct sk_hdr *skh;
rawobj_t msgbufs[3];
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht_bytes)
+ if (token->len < sizeof(skh) + sht->sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht_bytes;
+ skw.skw_hmac.len = sht->sht_bytes;
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
if (skw.skw_cipher.len % blocksize != 0)
msgbufs[0] = skw.skw_header;
msgbufs[1] = *gss_header;
msgbufs[2] = skw.skw_cipher;
- rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
- 0, NULL, &skw.skw_hmac);
+ rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL,
+ &skw.skw_hmac);
if (rc)
return rc;
int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
struct sk_wire skw;
struct sk_hdr skh;
__u8 local_iv[SK_IV_SIZE];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
desc, &skw.skw_cipher, adj_nob))
return GSS_S_FAILURE;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht_bytes;
- if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
- 1, &skw.skw_cipher, desc->bd_iov_count,
- GET_ENC_KIOV(desc), &skw.skw_hmac))
+ skw.skw_hmac.len = sht->sht_bytes;
+ if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher,
+ desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
return GSS_S_FAILURE;
return GSS_S_COMPLETE;
rawobj_t *token, int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
struct sk_wire skw;
struct sk_hdr *skh;
__u8 local_iv[SK_IV_SIZE];
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht_bytes)
+ if (token->len < sizeof(skh) + sht->sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ skw.skw_hmac.len = sht->sht_bytes;
skh = (struct sk_hdr *)skw.skw_header.data;
rc = sk_verify_header(skh);
if (rc != GSS_S_COMPLETE)
return rc;
- rc = sk_verify_bulk_hmac(skc->sc_hmac,
+ rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
&skc->sc_hmac_key, 1, &skw.skw_cipher,
desc->bd_iov_count, GET_ENC_KIOV(desc),
desc->bd_nob, &skw.skw_hmac);
[SK_CRYPT_AES256_CTR] = "AES-256-CTR",
};
-const char *sk_hmac2name[] = { "NONE", "SHA256", "SHA512" };
+char *sk_hmac2name[] = {
+ [SK_HMAC_EMPTY] = "NONE",
+ [SK_HMAC_SHA256] = "SHA256",
+ [SK_HMAC_SHA512] = "SHA512",
+};
static int sk_name2crypt(char *name)
{
return SK_CRYPT_INVALID;
}
-enum cfs_crypto_hash_alg sk_name2hmac(char *name)
+static int sk_name2hmac(char *name)
{
- enum cfs_crypto_hash_alg algo;
- int i = 0;
+ int i;
- /* convert to lower case */
- while (name[i]) {
- name[i] = tolower(name[i]);
- i++;
+ for (i = 0; i < SK_HMAC_MAX; i++) {
+ if (strcasecmp(name, sk_hmac2name[i]) == 0)
+ return i;
}
- if (strcmp(name, "none") == 0)
- return CFS_HASH_ALG_NULL;
-
- algo = cfs_crypto_hash_alg(name);
- if ((algo != CFS_HASH_ALG_SHA256) &&
- (algo != CFS_HASH_ALG_SHA512))
- return SK_HMAC_INVALID;
-
- return algo;
+ return SK_HMAC_INVALID;
}
static void usage(FILE *fp, char *program)
fprintf(fp, "-i|--hmac <num> Hash algorithm for integrity "
"(Default: SHA256)\n");
- for (i = 1; i < sizeof(sk_hmac2name) / sizeof(sk_hmac2name[0]); i++)
+ for (i = 1; i < SK_HMAC_MAX; i++)
fprintf(fp, " %s\n", sk_hmac2name[i]);
fprintf(fp, "-e|--expire <num> Seconds before contexts from "
"client)\n");
fprintf(fp, "-k|--key-bits <len> Shared key length in bits "
"(Default: %d)\n", SK_DEFAULT_SK_KEYLEN);
- fprintf(fp, "-d|--data <file> Key data source for new keys "
- "(Default: /dev/random)\n");
- fprintf(fp, " Not a seed value. This is the actual key value.\n\n");
+ fprintf(fp, "-d|--data <file> Key random data source "
+ "(Default: /dev/random)\n\n");
fprintf(fp, "Other Options:\n");
fprintf(fp, "-v|--verbose Increase verbosity for errors\n");
exit(EXIT_FAILURE);
printf(" client");
printf("\n");
printf("HMAC alg: %s\n", sk_hmac2name[config->skc_hmac_alg]);
- printf("Crypto alg: %s\n", cfs_crypto_hash_name(config->skc_hmac_alg));
+ printf("Crypto alg: %s\n", sk_crypt2name[config->skc_crypt_alg]);
printf("Ctx Expiration: %u seconds\n", config->skc_expire);
printf("Shared keylen: %u bits\n", config->skc_shared_keylen);
printf("Prime length: %u bits\n", config->skc_prime_bits);
char *tmp;
char *tmp2;
int crypt = SK_CRYPT_EMPTY;
- enum cfs_crypto_hash_alg hmac = CFS_HASH_ALG_NULL;
+ int hmac = SK_HMAC_EMPTY;
int expire = -1;
int shared_keylen = -1;
int prime_bits = -1;
fprintf(stderr, "error: invalid HMAC algorithm specified\n");
return EXIT_FAILURE;
}
- if (modify && datafile) {
- fprintf(stderr, "error: data file option not valid in key modify\n");
- return EXIT_FAILURE;
- }
if (modify) {
config = sk_read_file(modify);
config->skc_shared_keylen = SK_DEFAULT_SK_KEYLEN;
config->skc_prime_bits = SK_DEFAULT_PRIME_BITS;
config->skc_crypt_alg = SK_CRYPT_AES256_CTR;
- config->skc_hmac_alg = CFS_HASH_ALG_SHA256;
+ config->skc_hmac_alg = SK_HMAC_SHA256;
for (i = 0; i < MAX_MGSNIDS; i++)
config->skc_mgsnids[i] = LNET_NID_ANY;
generate_prime = type & SK_TYPE_CLIENT;
strncpy(config->skc_nodemap, SK_DEFAULT_NODEMAP,
- sizeof(config->skc_nodemap) - 1);
+ strlen(SK_DEFAULT_NODEMAP));
if (!datafile)
datafile = "/dev/random";
if (crypt != SK_CRYPT_EMPTY)
config->skc_crypt_alg = crypt;
- if (hmac != CFS_HASH_ALG_NULL)
+ if (hmac != SK_HMAC_EMPTY)
config->skc_hmac_alg = hmac;
if (expire != -1)
config->skc_expire = expire;
if (prime_bits != -1)
config->skc_prime_bits = prime_bits;
if (fsname)
- strncpy(config->skc_fsname, fsname,
- sizeof(config->skc_fsname) - 1);
+ strncpy(config->skc_fsname, fsname, strlen(fsname));
if (nodemap)
- strncpy(config->skc_nodemap, nodemap,
- sizeof(config->skc_nodemap) - 1);
+ strncpy(config->skc_nodemap, nodemap, strlen(nodemap));
if (mgsnids && parse_mgsnids(mgsnids, config))
goto error;
if (sk_validate_config(config)) {
static struct sk_crypt_type sk_crypt_types[] = {
[SK_CRYPT_AES256_CTR] = {
- .cht_name = "ctr(aes)",
- .cht_bytes = 32,
+ .sct_name = "ctr(aes)",
+ .sct_bytes = 32,
},
};
-/*
static struct sk_hmac_type sk_hmac_types[] = {
[SK_HMAC_SHA256] = {
- .cht_name = "sha256",
- .cht_bytes = 32,
+ .sht_name = "hmac(sha256)",
+ .sht_bytes = 32,
},
[SK_HMAC_SHA512] = {
- .cht_name = "sha512",
- .cht_bytes = 64,
+ .sht_name = "hmac(sha512)",
+ .sht_bytes = 64,
},
-};*/
+};
#ifdef _NEW_BUILD_
# include "lgss_utils.h"
printerr(0, "Invalid version\n");
return -1;
}
- if ((config->skc_hmac_alg != CFS_HASH_ALG_SHA256) &&
- (config->skc_hmac_alg != CFS_HASH_ALG_SHA512)) {
+ if (config->skc_hmac_alg >= SK_HMAC_MAX) {
printerr(0, "Invalid HMAC algorithm\n");
return -1;
}
*
* \retval EVP_MD
*/
-static inline const EVP_MD *sk_hash_to_evp_md(enum cfs_crypto_hash_alg alg)
+static inline const EVP_MD *sk_hash_to_evp_md(enum sk_hmac_alg alg)
{
switch (alg) {
- case CFS_HASH_ALG_SHA256:
+ case SK_HMAC_SHA256:
return EVP_sha256();
- case CFS_HASH_ALG_SHA512:
+ case SK_HMAC_SHA512:
return EVP_sha512();
default:
return EVP_md_null();
* If the size is smaller it will take copy the first N bytes necessary to
* fill the derived key. */
int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key,
- gss_buffer_desc *key_binding_bufs, int numbufs,
- enum cfs_crypto_hash_alg hmac_alg)
+ gss_buffer_desc *key_binding_bufs, int numbufs, int hmac_alg)
{
size_t remain;
size_t bytes;
return rc;
}
- if (cfs_crypto_hash_digestsize(hmac_alg) != tmp_hash.length) {
+ if (sk_hmac_types[hmac_alg].sht_bytes != tmp_hash.length) {
free(tmp_hash.value);
return -EINVAL;
}
gss_buffer_desc bufs[5];
int rc = -1;
- session_key->length = sk_crypt_types[kctx->skc_crypt_alg].cht_bytes;
+ session_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
session_key->value = malloc(session_key->length);
if (!session_key->value) {
printerr(0, "Failed to allocate memory for session key\n");
char *integrity = "Integrity";
int rc;
- hmac_key->length = cfs_crypto_hash_digestsize(kctx->skc_hmac_alg);
+ hmac_key->length = sk_hmac_types[kctx->skc_hmac_alg].sht_bytes;
hmac_key->value = malloc(hmac_key->length);
if (!hmac_key->value)
return -ENOMEM;
if ((skc->sc_flags & LGSS_SVC_PRIV) == 0)
return 0;
- encrypt_key->length = cfs_crypto_hash_digestsize(kctx->skc_hmac_alg);
+ encrypt_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
encrypt_key->value = malloc(encrypt_key->length);
if (!encrypt_key->value)
return -ENOMEM;
#include <openssl/evp.h>
#include <sys/types.h>
-#include <libcfs/libcfs_crypto.h>
#include "lsupport.h"
/* Some limits and defaults */
SK_RESP_BUFFERS = 4,
};
-#define SK_HMAC_INVALID 0xFF
-
/* String consisting of "lustre:fsname:nodemap_hash" */
#define SK_DESCRIPTION_SIZE (9 + MTI_NAME_MAXLEN + LUSTRE_NODEMAP_NAME_LENGTH)
/* File format version */
uint32_t skc_version;
/* HMAC algorithm used for message integrity */
- enum cfs_crypto_hash_alg skc_hmac_alg;
+ uint16_t skc_hmac_alg;
/* Crypt algorithm used for privacy mode */
uint16_t skc_crypt_alg;
/* Number of seconds that a context is valid after it is created from