From: James Simmons Date: Tue, 1 Aug 2017 21:35:19 +0000 (-0400) Subject: LU-8602 gss: Properly port gss to newer crypto api. X-Git-Tag: 2.12.0-RC1~60 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=a21c13d4df4bea1bec0f5804136740ed53d5a57f LU-8602 gss: Properly port gss to newer crypto api. In newer kernels the old crypto_hash_* was removed which requires the gss layer to now user crypto_ahash_*. Pieces of the port were done incorrectly which this patch addresses. In the process cleanup some cases where data was leaked in the case of the sg_tables. Address the limitation of the gss keys in that they are hard coded to u16 variables that are not guaranteed to be constant and it will prove to be challenge in the future to add new crypto algorithms. Keep u16 representation of hash and crypto algorithms in Shared-Keys as they are stored on disk, to not break compatibility with already existing keys. But pass in to kernel space the name string of the crypto algorithm we want to use. Also deal with timer_setup and key.usage new format introduced in newer kernels. Test-Parameters: testlist=sanity-sec envdefinitions=SHARED_KEY=true Change-Id: I90ef58104a1955ce12603173964a6878f60b601e Signed-off-by: James Simmons Signed-off-by: Sebastien Buisson Reviewed-on: https://review.whamcloud.com/28309 Reviewed-by: John L. Hammond Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin --- diff --git a/libcfs/include/libcfs/libcfs_crypto.h b/libcfs/include/libcfs/libcfs_crypto.h index ea9234a..af67f53 100644 --- a/libcfs/include/libcfs/libcfs_crypto.h +++ b/libcfs/include/libcfs/libcfs_crypto.h @@ -38,6 +38,12 @@ struct cfs_crypto_hash_type { unsigned int cht_size; /**< hash digest size */ }; +struct cfs_crypto_crypt_type { + char *cct_name; /**< crypto algorithm name, equal to + * format name for crypto api */ + unsigned int cct_size; /**< crypto key size */ +}; + enum cfs_crypto_hash_alg { CFS_HASH_ALG_NULL = 0, CFS_HASH_ALG_ADLER32, @@ -54,6 +60,13 @@ enum cfs_crypto_hash_alg { CFS_HASH_ALG_UNKNOWN = 0xff }; +enum cfs_crypto_crypt_alg { + CFS_CRYPT_ALG_NULL = 0, + CFS_CRYPT_ALG_AES256_CTR, + CFS_CRYPT_ALG_MAX, + CFS_CRYPT_ALG_UNKNOWN = 0xff +}; + static struct cfs_crypto_hash_type hash_types[] = { [CFS_HASH_ALG_NULL] = { .cht_name = "null", @@ -107,6 +120,17 @@ static struct cfs_crypto_hash_type hash_types[] = { } }; +static struct cfs_crypto_crypt_type crypt_types[] = { + [CFS_CRYPT_ALG_NULL] = { + .cct_name = "null", + .cct_size = 0 + }, + [CFS_CRYPT_ALG_AES256_CTR] = { + .cct_name = "ctr(aes)", + .cct_size = 32 + } +}; + /* Maximum size of hash_types[].cht_size */ #define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64 @@ -188,6 +212,86 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname) return CFS_HASH_ALG_UNKNOWN; } +/** + * Return crypt algorithm information for the specified algorithm identifier + * + * Crypt information includes algorithm name, key size. + * + * \retval cfs_crypto_crupt_type for valid ID (CFS_CRYPT_ALG_*) + * \retval NULL for unknown algorithm identifier + */ +static inline const struct +cfs_crypto_crypt_type *cfs_crypto_crypt_type( + enum cfs_crypto_crypt_alg crypt_alg) +{ + struct cfs_crypto_crypt_type *ct; + + if (crypt_alg < CFS_CRYPT_ALG_MAX) { + ct = &crypt_types[crypt_alg]; + if (ct->cct_name != NULL) + return ct; + } + return NULL; +} + +/** + * Return crypt name for crypt algorithm identifier + * + * \param[in] crypt_alg crypt alrgorithm id (CFS_CRYPT_ALG_*) + * + * \retval string name of known crypt algorithm + * \retval "unknown" if hash algorithm is unknown + */ +static inline const +char *cfs_crypto_crypt_name(enum cfs_crypto_crypt_alg crypt_alg) +{ + const struct cfs_crypto_crypt_type *ct; + + ct = cfs_crypto_crypt_type(crypt_alg); + if (ct) + return ct->cct_name; + + return "unknown"; +} + + +/** + * Return key size for crypto algorithm type + * + * \param[in] crypt_alg crypt alrgorithm id (CFS_CRYPT_ALG_*) + * + * \retval crypt algorithm key size in bytes + * \retval 0 if crypt algorithm type is unknown + */ +static inline +unsigned int cfs_crypto_crypt_keysize(enum cfs_crypto_crypt_alg crypt_alg) +{ + const struct cfs_crypto_crypt_type *ct; + + ct = cfs_crypto_crypt_type(crypt_alg); + if (ct != NULL) + return ct->cct_size; + + return 0; +} + +/** + * Find crypto algorithm ID for the specified algorithm name + * + * \retval crypto algorithm ID for valid ID (CFS_CRYPT_ALG_*) + * \retval CFS_CRYPT_ALG_UNKNOWN for unknown algorithm name + */ +static inline unsigned char cfs_crypto_crypt_alg(const char *algname) +{ + enum cfs_crypto_crypt_alg crypt_alg; + + for (crypt_alg = 0; crypt_alg < CFS_CRYPT_ALG_MAX; crypt_alg++) + if (strcmp(crypt_types[crypt_alg].cct_name, algname) == 0) + return crypt_alg; + + return CFS_CRYPT_ALG_UNKNOWN; +} + int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, diff --git a/lustre/autoconf/lustre-core.m4 b/lustre/autoconf/lustre-core.m4 index 9841632..d751e49 100644 --- a/lustre/autoconf/lustre-core.m4 +++ b/lustre/autoconf/lustre-core.m4 @@ -266,23 +266,6 @@ kernel SUNRPC support is required by using GSS. ]) # LC_CONFIG_SUNRPC # -# LC_HAVE_CRYPTO_HASH -# -# 4.6 kernel commit 896545098777564212b9e91af4c973f094649aa7 -# removed crypto_hash support. Since GSS only works with -# crypto_hash it has to be disabled for newer distros. -# -AC_DEFUN([LC_HAVE_CRYPTO_HASH], [ -LB_CHECK_COMPILE([if crypto_hash API is supported], -crypto_hash, [ - #include -],[ - crypto_hash_digestsize(NULL); -], [], [enable_gss="no"]) -]) -]) # LC_HAVE_CRYPTO_HASH - -# # LC_CONFIG_GSS (default 'auto' (tests for dependencies, if found, enables)) # # Build gss and related tools of Lustre. Currently both kernel and user space @@ -297,7 +280,6 @@ AC_MSG_RESULT([$enable_gss]) AS_IF([test "x$enable_gss" != xno], [ LC_CONFIG_GSS_KEYRING - LC_HAVE_CRYPTO_HASH LC_HAVE_CRED_TGCRED LC_KEY_TYPE_INSTANTIATE_2ARGS sunrpc_required=$enable_gss @@ -2850,6 +2832,45 @@ vm_operations_no_vm_area_struct, [ ]) # LC_VM_OPERATIONS_REMOVE_VMF_ARG # +# LC_HAVE_KEY_USAGE_REFCOUNT +# +# Kernel version 4.11 commit fff292914d3a2f1efd05ca71c2ba72a3c663201e +# converted key.usage from atomic_t to refcount_t. +# +AC_DEFUN([LC_HAVE_KEY_USAGE_REFCOUNT], [ +LB_CHECK_COMPILE([if 'key.usage' is refcount_t], +key_usage_refcount, [ + #include +],[ + struct key key = { }; + + refcount_read(&key.usage); +],[ + AC_DEFINE(HAVE_KEY_USAGE_REFCOUNT, 1, [key.usage is of type refcount_t]) +]) +]) #LC_HAVE_KEY_USAGE_REFCOUNT + +# +# LC_HAVE_CRYPTO_MAX_ALG_NAME_128 +# +# Kernel version 4.11 commit f437a3f477cce402dbec6537b29e9e33962c9f73 +# switched CRYPTO_MAX_ALG_NAME from 64 to 128. +# +AC_DEFUN([LC_HAVE_CRYPTO_MAX_ALG_NAME_128], [ +LB_CHECK_COMPILE([if 'CRYPTO_MAX_ALG_NAME' is 128], +crypto_max_alg_name, [ + #include +],[ + #if CRYPTO_MAX_ALG_NAME != 128 + exit(1); + #endif +],[ + AC_DEFINE(HAVE_CRYPTO_MAX_ALG_NAME_128, 1, + ['CRYPTO_MAX_ALG_NAME' is 128]) +]) +]) # LC_HAVE_CRYPTO_MAX_ALG_NAME_128 + +# # Kernel version 4.12 commit 47f38c539e9a42344ff5a664942075bd4df93876 # CURRENT_TIME is not 64 bit time safe so it was replaced with # current_time() @@ -2952,7 +2973,6 @@ bi_bdev, [ ]) ]) # LC_BI_BDEV - # # LC_PROG_LINUX # @@ -3179,6 +3199,8 @@ AC_DEFUN([LC_PROG_LINUX], [ # 4.11 LC_INODEOPS_ENHANCED_GETATTR LC_VM_OPERATIONS_REMOVE_VMF_ARG + LC_HAVE_KEY_USAGE_REFCOUNT + LC_HAVE_CRYPTO_MAX_ALG_NAME_128 # 4.12 LC_CURRENT_TIME diff --git a/lustre/include/uapi/linux/lustre/lustre_user.h b/lustre/include/uapi/linux/lustre/lustre_user.h index 1ca6449..e273457 100644 --- a/lustre/include/uapi/linux/lustre/lustre_user.h +++ b/lustre/include/uapi/linux/lustre/lustre_user.h @@ -2094,7 +2094,6 @@ enum sk_crypt_alg { SK_CRYPT_INVALID = -1, SK_CRYPT_EMPTY = 0, SK_CRYPT_AES256_CTR = 1, - SK_CRYPT_MAX = 2, }; enum sk_hmac_alg { @@ -2102,17 +2101,16 @@ enum sk_hmac_alg { SK_HMAC_EMPTY = 0, SK_HMAC_SHA256 = 1, SK_HMAC_SHA512 = 2, - SK_HMAC_MAX = 3, }; struct sk_crypt_type { - char *sct_name; - size_t sct_bytes; + const char *sct_name; + int sct_type; }; struct sk_hmac_type { - char *sht_name; - size_t sht_bytes; + const char *sht_name; + int sht_type; }; enum lock_mode_user { diff --git a/lustre/ptlrpc/gss/gss_crypto.c b/lustre/ptlrpc/gss/gss_crypto.c index 17fd9cf..cadb4d4 100644 --- a/lustre/ptlrpc/gss/gss_crypto.c +++ b/lustre/ptlrpc/gss/gss_crypto.c @@ -55,7 +55,7 @@ #include "gss_internal.h" #include "gss_crypto.h" -int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name, +int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name, const int alg_mode) { int rc; @@ -270,106 +270,32 @@ out: return ret; } -int gss_digest_hmac(struct crypto_hash *tfm, - rawobj_t *key, - rawobj_t *hdr, - int msgcnt, rawobj_t *msgs, +int gss_digest_hash(struct cfs_crypto_hash_desc *desc, + rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { - struct hash_desc desc = { - .tfm = tfm, - .flags = 0, - }; + struct ahash_request *req = (struct ahash_request *)desc; struct scatterlist sg[1]; struct sg_table sgt; + int rc = 0; int i; - int rc; - - rc = crypto_hash_setkey(tfm, key->data, key->len); - if (rc) - return rc; - - rc = crypto_hash_init(&desc); - if (rc) - return rc; for (i = 0; i < msgcnt; i++) { if (msgs[i].len == 0) continue; rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len); - if (rc != 0) - return rc; - rc = crypto_hash_update(&desc, sg, msgs[i].len); if (rc) return rc; + ahash_request_set_crypt(req, sg, NULL, msgs[i].len); + rc = crypto_ahash_update(req); gss_teardown_sgtable(&sgt); - } - - for (i = 0; i < iovcnt; i++) { - if (iovs[i].kiov_len == 0) - continue; - - sg_init_table(sg, 1); - sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, - iovs[i].kiov_offset); - rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len); if (rc) return rc; } - if (hdr) { - rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr)); - if (rc != 0) - return rc; - rc = crypto_hash_update(&desc, sg, sizeof(hdr->len)); - if (rc) - return rc; - - gss_teardown_sgtable(&sgt); - } - - return crypto_hash_final(&desc, cksum->data); -} - -int gss_digest_norm(struct crypto_hash *tfm, - struct gss_keyblock *kb, - rawobj_t *hdr, - int msgcnt, rawobj_t *msgs, - int iovcnt, lnet_kiov_t *iovs, - rawobj_t *cksum) -{ - struct hash_desc desc; - struct scatterlist sg[1]; - struct sg_table sgt; - int i; - int rc; - - LASSERT(kb->kb_tfm); - desc.tfm = tfm; - desc.flags = 0; - - rc = crypto_hash_init(&desc); - if (rc) - return rc; - - for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) - continue; - - rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len); - if (rc != 0) - return rc; - - rc = crypto_hash_update(&desc, sg, msgs[i].len); - if (rc) - return rc; - - gss_teardown_sgtable(&sgt); - } - for (i = 0; i < iovcnt; i++) { if (iovs[i].kiov_len == 0) continue; @@ -377,29 +303,26 @@ int gss_digest_norm(struct crypto_hash *tfm, sg_init_table(sg, 1); sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, iovs[i].kiov_offset); - rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len); + + ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len); + rc = crypto_ahash_update(req); if (rc) return rc; } if (hdr) { rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr)); - if (rc != 0) - return rc; - - rc = crypto_hash_update(&desc, sg, sizeof(*hdr)); if (rc) return rc; + ahash_request_set_crypt(req, sg, NULL, hdr->len); + rc = crypto_ahash_update(req); gss_teardown_sgtable(&sgt); + if (rc) + return rc; } - rc = crypto_hash_final(&desc, cksum->data); - if (rc) - return rc; - - return gss_crypt_generic(kb->kb_tfm, 0, NULL, cksum->data, - cksum->data, cksum->len); + return rc; } int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize) diff --git a/lustre/ptlrpc/gss/gss_crypto.h b/lustre/ptlrpc/gss/gss_crypto.h index 99c2337..c0770e8 100644 --- a/lustre/ptlrpc/gss/gss_crypto.h +++ b/lustre/ptlrpc/gss/gss_crypto.h @@ -10,7 +10,7 @@ struct gss_keyblock { struct crypto_blkcipher *kb_tfm; }; -int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name, +int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name, const int alg_mode); void gss_keyblock_free(struct gss_keyblock *kb); int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb); @@ -23,12 +23,9 @@ int gss_setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, void gss_teardown_sgtable(struct sg_table *sgt); int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv, const void *in, void *out, size_t length); -int gss_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, rawobj_t *hdr, +int gss_digest_hash(struct cfs_crypto_hash_desc *desc, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum); -int gss_digest_norm(struct crypto_hash *tfm, struct gss_keyblock *kb, - rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, rawobj_t *cksum); int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize); int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, diff --git a/lustre/ptlrpc/gss/gss_internal.h b/lustre/ptlrpc/gss/gss_internal.h index fc53579..f0b9d28 100644 --- a/lustre/ptlrpc/gss/gss_internal.h +++ b/lustre/ptlrpc/gss/gss_internal.h @@ -11,7 +11,8 @@ #ifndef __PTLRPC_GSS_GSS_INTERNAL_H_ #define __PTLRPC_GSS_GSS_INTERNAL_H_ -#include +#include +#include #include /* @@ -553,4 +554,13 @@ void __dbg_memdump(char *name, void *ptr, int size) OBD_FREE(buf, bufsize); } +static inline unsigned int ll_read_key_usage(struct key *key) +{ +#ifdef HAVE_KEY_USAGE_REFCOUNT + return refcount_read(&key->usage); +#else + return atomic_read(&key->usage); +#endif +} + #endif /* __PTLRPC_GSS_GSS_INTERNAL_H_ */ diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index 38aa547..8d8ab4b 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -107,11 +107,11 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec, #define DUMP_KEY(key) \ { \ - CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \ - key, key->serial, atomic_read(&key->usage), \ - key->uid, key->gid, \ - key->description ? key->description : "n/a" \ - ); \ + CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \ + key, key->serial, ll_read_key_usage(key), \ + key->uid, key->gid, \ + key->description ? key->description : "n/a" \ + ); \ } #define key_cred(tsk) ((tsk)->cred) @@ -140,10 +140,12 @@ static inline void key_revoke_locked(struct key *key) set_bit(KEY_FLAG_REVOKED, &key->flags); } -static void ctx_upcall_timeout_kr(unsigned long data) +static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data) { - struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data; - struct key *key = ctx2gctx_keyring(ctx)->gck_key; + struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr, + &data, gck_timer); + struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base); + struct key *key = gctx_kr->gck_key; CWARN("ctx %p, key %p\n", ctx, key); @@ -162,11 +164,9 @@ static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, time64_t timeout) CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout); - init_timer(timer); + cfs_timer_setup(timer, ctx_upcall_timeout_kr, + (unsigned long)gctx_kr, 0); timer->expires = cfs_time_seconds(timeout) + jiffies; - timer->data = (unsigned long ) ctx; - timer->function = ctx_upcall_timeout_kr; - add_timer(timer); } @@ -195,27 +195,27 @@ static struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec, struct vfs_cred *vcred) { - struct ptlrpc_cli_ctx *ctx; - struct gss_cli_ctx_keyring *gctx_kr; + struct ptlrpc_cli_ctx *ctx; + struct gss_cli_ctx_keyring *gctx_kr; - OBD_ALLOC_PTR(gctx_kr); - if (gctx_kr == NULL) - return NULL; + OBD_ALLOC_PTR(gctx_kr); + if (gctx_kr == NULL) + return NULL; - OBD_ALLOC_PTR(gctx_kr->gck_timer); - if (gctx_kr->gck_timer == NULL) { - OBD_FREE_PTR(gctx_kr); - return NULL; - } - init_timer(gctx_kr->gck_timer); + OBD_ALLOC_PTR(gctx_kr->gck_timer); + if (gctx_kr->gck_timer == NULL) { + OBD_FREE_PTR(gctx_kr); + return NULL; + } + cfs_timer_setup(gctx_kr->gck_timer, NULL, 0, 0); - ctx = &gctx_kr->gck_base.gc_base; + ctx = &gctx_kr->gck_base.gc_base; - if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) { - OBD_FREE_PTR(gctx_kr->gck_timer); - OBD_FREE_PTR(gctx_kr); - return NULL; - } + if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) { + OBD_FREE_PTR(gctx_kr->gck_timer); + OBD_FREE_PTR(gctx_kr); + return NULL; + } ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT; clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags); @@ -386,7 +386,7 @@ static int key_set_payload(struct key *key, unsigned int index, static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) { LASSERT(atomic_read(&ctx->cc_refcount) > 0); - LASSERT(atomic_read(&key->usage) > 0); + LASSERT(ll_read_key_usage(key) > 0); LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL); LASSERT(!key_get_payload(key, 0)); @@ -861,7 +861,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, if (likely(ctx)) { LASSERT(atomic_read(&ctx->cc_refcount) >= 1); LASSERT(ctx2gctx_keyring(ctx)->gck_key == key); - LASSERT(atomic_read(&key->usage) >= 2); + LASSERT(ll_read_key_usage(key) >= 2); /* simply take a ref and return. it's upper layer's * responsibility to detect & replace dead ctx. */ @@ -1101,7 +1101,7 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq) atomic_read(&gctx->gc_seq), gctx->gc_win, key ? key->serial : 0, - key ? atomic_read(&key->usage) : 0, + key ? ll_read_key_usage(key) : 0, gss_handle_to_u64(&gctx->gc_handle), gss_handle_to_u64(&gctx->gc_svc_handle), mech); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 0508423..305402d 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -97,7 +97,7 @@ static struct krb5_enctype enctypes[] = { [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */ .ke_dispname = "des3-hmac-sha1", .ke_enc_name = "cbc(des3_ede)", - .ke_hash_name = "hmac(sha1)", + .ke_hash_name = "sha1", .ke_hash_size = 20, .ke_conf_size = 8, .ke_hash_hmac = 1, @@ -105,7 +105,7 @@ static struct krb5_enctype enctypes[] = { [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */ .ke_dispname = "aes128-cts-hmac-sha1-96", .ke_enc_name = "cbc(aes)", - .ke_hash_name = "hmac(sha1)", + .ke_hash_name = "sha1", .ke_hash_size = 12, .ke_conf_size = 16, .ke_hash_hmac = 1, @@ -113,7 +113,7 @@ static struct krb5_enctype enctypes[] = { [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */ .ke_dispname = "aes256-cts-hmac-sha1-96", .ke_enc_name = "cbc(aes)", - .ke_hash_name = "hmac(sha1)", + .ke_hash_name = "sha1", .ke_hash_size = 12, .ke_conf_size = 16, .ke_hash_hmac = 1, @@ -121,33 +121,31 @@ static struct krb5_enctype enctypes[] = { [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */ .ke_dispname = "arcfour-hmac-md5", .ke_enc_name = "ecb(arc4)", - .ke_hash_name = "hmac(md5)", + .ke_hash_name = "md5", .ke_hash_size = 16, .ke_conf_size = 8, .ke_hash_hmac = 1, } }; -#define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype) - static const char * enctype2str(__u32 enctype) { - if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname) - return enctypes[enctype].ke_dispname; + if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname) + return enctypes[enctype].ke_dispname; - return "unknown"; + return "unknown"; } static int krb5_init_keys(struct krb5_ctx *kctx) { - struct krb5_enctype *ke; + struct krb5_enctype *ke; - if (kctx->kc_enctype >= MAX_ENCTYPES || - enctypes[kctx->kc_enctype].ke_hash_size == 0) { - CERROR("unsupported enctype %x\n", kctx->kc_enctype); - return -1; - } + if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) || + enctypes[kctx->kc_enctype].ke_hash_size == 0) { + CERROR("unsupported enctype %x\n", kctx->kc_enctype); + return -1; + } ke = &enctypes[kctx->kc_enctype]; @@ -449,39 +447,58 @@ __s32 krb5_make_checksum(__u32 enctype, int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum) { - struct krb5_enctype *ke = &enctypes[enctype]; - struct crypto_hash *tfm; - rawobj_t hdr; - __u32 code = GSS_S_FAILURE; - int rc; - - if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) { - CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name); - return GSS_S_FAILURE; - } + struct krb5_enctype *ke = &enctypes[enctype]; + struct cfs_crypto_hash_desc *desc = NULL; + enum cfs_crypto_hash_alg hash_algo; + rawobj_t hdr; + int rc; + + hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name); + + /* For the cbc(des) case we want md5 instead of hmac(md5) */ + if (strcmp(ke->ke_enc_name, "cbc(des)")) + desc = cfs_crypto_hash_init(hash_algo, kb->kb_key.data, + kb->kb_key.len); + else + desc = cfs_crypto_hash_init(hash_algo, NULL, 0); + if (IS_ERR(desc)) { + rc = PTR_ERR(desc); + CERROR("failed to alloc hash %s : rc = %d\n", + ke->ke_hash_name, rc); + goto out_no_hash; + } - cksum->len = crypto_hash_digestsize(tfm); - OBD_ALLOC_LARGE(cksum->data, cksum->len); - if (!cksum->data) { - cksum->len = 0; - goto out_tfm; - } + cksum->len = cfs_crypto_hash_digestsize(hash_algo); + OBD_ALLOC_LARGE(cksum->data, cksum->len); + if (!cksum->data) { + cksum->len = 0; + rc = -ENOMEM; + goto out_free_hash; + } hdr.data = (__u8 *)khdr; hdr.len = sizeof(*khdr); - if (ke->ke_hash_hmac) - rc = gss_digest_hmac(tfm, &kb->kb_key, - &hdr, msgcnt, msgs, iovcnt, iovs, cksum); - else - rc = gss_digest_norm(tfm, kb, - &hdr, msgcnt, msgs, iovcnt, iovs, cksum); + rc = gss_digest_hash(desc, &hdr, msgcnt, msgs, + iovcnt, iovs, cksum); + if (rc) + goto out_free_hash; - if (rc == 0) - code = GSS_S_COMPLETE; -out_tfm: - crypto_free_hash(tfm); - return code; + if (!ke->ke_hash_hmac) { + LASSERT(kb->kb_tfm); + + cfs_crypto_hash_final(desc, cksum->data, &cksum->len); + rc = gss_crypt_generic(kb->kb_tfm, 0, NULL, + cksum->data, cksum->data, + cksum->len); + goto out_no_hash; + } + +out_free_hash: + if (desc) + cfs_crypto_hash_final(desc, cksum->data, &cksum->len); +out_no_hash: + return rc ? GSS_S_FAILURE : GSS_S_COMPLETE; } static void fill_krb5_header(struct krb5_ctx *kctx, @@ -563,7 +580,8 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, struct krb5_ctx *kctx = gctx->internal_ctx_id; struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; struct krb5_header *khdr; - rawobj_t cksum = RAWOBJ_EMPTY; + rawobj_t cksum = RAWOBJ_EMPTY; + u32 major; /* fill krb5 header */ LASSERT(token->len >= sizeof(*khdr)); @@ -571,18 +589,20 @@ __u32 gss_get_mic_kerberos(struct gss_ctx *gctx, fill_krb5_header(kctx, khdr, 0); /* checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, - khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) - return GSS_S_FAILURE; + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr, + msgcnt, msgs, iovcnt, iovs, &cksum)) + GOTO(out_free_cksum, major = GSS_S_FAILURE); LASSERT(cksum.len >= ke->ke_hash_size); LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size); memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size); - token->len = sizeof(*khdr) + ke->ke_hash_size; - rawobj_free(&cksum); - return GSS_S_COMPLETE; + token->len = sizeof(*khdr) + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; } static @@ -593,11 +613,11 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, lnet_kiov_t *iovs, rawobj_t *token) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; - struct krb5_header *khdr; - rawobj_t cksum = RAWOBJ_EMPTY; - __u32 major; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + struct krb5_enctype *ke = &enctypes[kctx->kc_enctype]; + struct krb5_header *khdr; + rawobj_t cksum = RAWOBJ_EMPTY; + u32 major; if (token->len < sizeof(*khdr)) { CERROR("short signature: %u\n", token->len); @@ -609,31 +629,32 @@ __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx, major = verify_krb5_header(kctx, khdr, 0); if (major != GSS_S_COMPLETE) { CERROR("bad krb5 header\n"); - return major; + goto out; } if (token->len < sizeof(*khdr) + ke->ke_hash_size) { CERROR("short signature: %u, require %d\n", token->len, (int) sizeof(*khdr) + ke->ke_hash_size); - return GSS_S_FAILURE; + GOTO(out, major = GSS_S_FAILURE); } if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) { CERROR("failed to make checksum\n"); - return GSS_S_FAILURE; + GOTO(out_free_cksum, major = GSS_S_FAILURE); } LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size)) { CERROR("checksum mismatch\n"); - rawobj_free(&cksum); - return GSS_S_BAD_SIG; - } - - rawobj_free(&cksum); - return GSS_S_COMPLETE; + GOTO(out_free_cksum, major = GSS_S_BAD_SIG); + } + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); +out: + return major; } /* @@ -944,6 +965,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, rawobj_t data_desc[3], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; __u8 local_iv[16] = {0}; + u32 major; int rc = 0; LASSERT(ke); @@ -998,7 +1020,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, /* compute checksum */ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, khdr, 3, data_desc, 0, NULL, &cksum)) - return GSS_S_FAILURE; + GOTO(out_free_cksum, major = GSS_S_FAILURE); LASSERT(cksum.len >= ke->ke_hash_size); /* @@ -1020,13 +1042,13 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr)); if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { - rawobj_t arc4_keye; + rawobj_t arc4_keye = RAWOBJ_EMPTY; struct crypto_blkcipher *arc4_tfm; if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi, NULL, 1, &cksum, 0, NULL, &arc4_keye)) { CERROR("failed to obtain arc4 enc key\n"); - GOTO(arc4_out, rc = -EACCES); + GOTO(arc4_out_key, rc = -EACCES); } arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0); @@ -1047,29 +1069,27 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, arc4_out_tfm: crypto_free_blkcipher(arc4_tfm); arc4_out_key: - rawobj_free(&arc4_keye); -arc4_out: - do {} while(0); /* just to avoid compile warning */ + rawobj_free(&arc4_keye); } else { rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3, data_desc, &cipher, 1); } - if (rc != 0) { - rawobj_free(&cksum); - return GSS_S_FAILURE; - } + if (rc) + GOTO(out_free_cksum, major = GSS_S_FAILURE); /* fill in checksum */ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); memcpy((char *)(khdr + 1) + cipher.len, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size); - rawobj_free(&cksum); - /* final token length */ - token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; - return GSS_S_COMPLETE; + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; } static @@ -1120,7 +1140,8 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t data_desc[1], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; - int rc = 0; + int rc = 0; + u32 major; LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); @@ -1174,7 +1195,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, khdr, 1, data_desc, desc->bd_iov_count, GET_KIOV(desc), &cksum)) - return GSS_S_FAILURE; + GOTO(out_free_cksum, major = GSS_S_FAILURE); LASSERT(cksum.len >= ke->ke_hash_size); /* @@ -1202,22 +1223,21 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr, conf, desc, &cipher, adj_nob); } - - if (rc != 0) { - rawobj_free(&cksum); - return GSS_S_FAILURE; - } + if (rc) + GOTO(out_free_cksum, major = GSS_S_FAILURE); /* fill in checksum */ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size); memcpy((char *)(khdr + 1) + cipher.len, cksum.data + cksum.len - ke->ke_hash_size, ke->ke_hash_size); - rawobj_free(&cksum); - /* final token length */ - token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; - return GSS_S_COMPLETE; + /* final token length */ + token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size; + major = GSS_S_COMPLETE; +out_free_cksum: + rawobj_free(&cksum); + return major; } static diff --git a/lustre/ptlrpc/gss/gss_sk_mech.c b/lustre/ptlrpc/gss/gss_sk_mech.c index 621a985..9fab35a 100644 --- a/lustre/ptlrpc/gss/gss_sk_mech.c +++ b/lustre/ptlrpc/gss/gss_sk_mech.c @@ -61,14 +61,14 @@ #define SK_IV_REV_START (1ULL << 63) struct sk_ctx { - __u16 sc_hmac; - __u16 sc_crypt; - __u32 sc_expire; - __u32 sc_host_random; - __u32 sc_peer_random; - atomic64_t sc_iv; - rawobj_t sc_hmac_key; - struct gss_keyblock sc_session_kb; + enum cfs_crypto_crypt_alg sc_crypt; + enum cfs_crypto_hash_alg sc_hmac; + __u32 sc_expire; + __u32 sc_host_random; + __u32 sc_peer_random; + atomic64_t sc_iv; + rawobj_t sc_hmac_key; + struct gss_keyblock sc_session_kb; }; struct sk_hdr { @@ -87,24 +87,6 @@ struct sk_wire { rawobj_t skw_hmac; }; -static struct sk_crypt_type sk_crypt_types[] = { - [SK_CRYPT_AES256_CTR] = { - .sct_name = "ctr(aes)", - .sct_bytes = 32, - }, -}; - -static struct sk_hmac_type sk_hmac_types[] = { - [SK_HMAC_SHA256] = { - .sht_name = "hmac(sha256)", - .sht_bytes = 32, - }, - [SK_HMAC_SHA512] = { - .sht_name = "hmac(sha512)", - .sht_bytes = 64, - }, -}; - static inline unsigned long sk_block_mask(unsigned long len, int blocksize) { return (len + blocksize - 1) & (~(blocksize - 1)); @@ -147,22 +129,18 @@ void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv) memcpy(iv, &ctr, sizeof(ctr)); } -static int sk_init_keys(struct sk_ctx *skc) -{ - return gss_keyblock_init(&skc->sc_session_kb, - sk_crypt_types[skc->sc_crypt].sct_name, 0); -} - static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc) { char *ptr = inbuf->data; char *end = inbuf->data + inbuf->len; - __u32 tmp; + char sk_hmac[CRYPTO_MAX_ALG_NAME]; + char sk_crypt[CRYPTO_MAX_ALG_NAME]; + u32 tmp; /* see sk_serialize_kctx() for format from userspace side */ /* 1. Version */ if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) { - CERROR("Failed to read shared key interface version"); + CERROR("Failed to read shared key interface version\n"); return -1; } if (tmp != SK_INTERFACE_VERSION) { @@ -171,28 +149,34 @@ static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc) } /* 2. HMAC type */ - if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) { - CERROR("Failed to read HMAC algorithm type"); + if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) { + CERROR("Failed to read HMAC algorithm type\n"); return -1; } - if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) { - CERROR("Invalid hmac type: %d\n", skc->sc_hmac); + + skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac); + if (skc->sc_hmac != CFS_HASH_ALG_NULL && + skc->sc_hmac != CFS_HASH_ALG_SHA256 && + skc->sc_hmac != CFS_HASH_ALG_SHA512) { + CERROR("Invalid hmac type: %s\n", sk_hmac); return -1; } /* 3. crypt type */ - if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) { - CERROR("Failed to read crypt algorithm type"); + if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) { + CERROR("Failed to read crypt algorithm type\n"); return -1; } - if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) { - CERROR("Invalid crypt type: %d\n", skc->sc_crypt); + + skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt); + if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) { + CERROR("Invalid crypt type: %s\n", sk_crypt); return -1; } /* 4. expiration time */ if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) { - CERROR("Failed to read context expiration time"); + CERROR("Failed to read context expiration time\n"); return -1; } skc->sc_expire = tmp + ktime_get_real_seconds(); @@ -200,20 +184,20 @@ static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc) /* 5. host random is used as nonce for encryption */ if (gss_get_bytes(&ptr, end, &skc->sc_host_random, sizeof(skc->sc_host_random))) { - CERROR("Failed to read host random "); + CERROR("Failed to read host random\n"); return -1; } /* 6. peer random is used as nonce for decryption */ if (gss_get_bytes(&ptr, end, &skc->sc_peer_random, sizeof(skc->sc_peer_random))) { - CERROR("Failed to read peer random "); + CERROR("Failed to read peer random\n"); return -1; } /* 7. HMAC key */ if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) { - CERROR("Failed to read HMAC key"); + CERROR("Failed to read HMAC key\n"); return -1; } if (skc->sc_hmac_key.len <= SK_MIN_SIZE) { @@ -224,7 +208,7 @@ static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc) /* 8. Session key, can be empty if not using privacy mode */ if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) { - CERROR("Failed to read session key"); + CERROR("Failed to read session key\n"); return -1; } @@ -262,7 +246,8 @@ __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context) /* Only privacy mode needs to initialize keys */ if (skc->sc_session_kb.kb_key.len > 0) { privacy = true; - if (sk_init_keys(skc)) + if (gss_keyblock_init(&skc->sc_session_kb, + cfs_crypto_crypt_name(skc->sc_crypt), 0)) goto out_err; } @@ -303,7 +288,9 @@ __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old, /* Only privacy mode needs to initialize keys */ if (skc_new->sc_session_kb.kb_key.len > 0) - if (sk_init_keys(skc_new)) + if (gss_keyblock_init(&skc_new->sc_session_kb, + cfs_crypto_crypt_name(skc_new->sc_crypt), + 0)) goto out_err; gss_context_new->internal_ctx_id = skc_new; @@ -327,24 +314,26 @@ __u32 gss_inquire_context_sk(struct gss_ctx *gss_context, } static -__u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs, - int iov_count, lnet_kiov_t *iovs, rawobj_t *token) +u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count, + rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs, + rawobj_t *token) { - struct crypto_hash *tfm; - int rc; - - tfm = crypto_alloc_hash(alg_name, 0, 0); - if (IS_ERR(tfm)) - return GSS_S_FAILURE; + struct cfs_crypto_hash_desc *desc; + int rc2, rc; - rc = GSS_S_FAILURE; - LASSERT(token->len >= crypto_hash_digestsize(tfm)); - if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs, - token)) - rc = GSS_S_COMPLETE; + desc = cfs_crypto_hash_init(algo, key->data, key->len); + if (IS_ERR(desc)) { + rc = PTR_ERR(desc); + goto out_init_failed; + } - crypto_free_hash(tfm); - return rc; + rc2 = gss_digest_hash(desc, NULL, msg_count, msgs, iov_count, iovs, + token); + rc = cfs_crypto_hash_final(desc, key->data, &key->len); + if (!rc && rc2) + rc = rc2; +out_init_failed: + return rc ? GSS_S_FAILURE : GSS_S_COMPLETE; } static @@ -356,20 +345,22 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; - return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name, + + return sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, message_count, messages, iov_count, iovs, token); } static -__u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count, - rawobj_t *messages, int iov_count, lnet_kiov_t *iovs, - rawobj_t *token) +u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, + int message_count, rawobj_t *messages, + int iov_count, lnet_kiov_t *iovs, + rawobj_t *token) { rawobj_t checksum = RAWOBJ_EMPTY; __u32 rc = GSS_S_FAILURE; - checksum.len = sht->sht_bytes; + checksum.len = cfs_crypto_hash_digestsize(algo); if (token->len < checksum.len) { CDEBUG(D_SEC, "Token received too short, expected %d " "received %d\n", token->len, checksum.len); @@ -380,8 +371,8 @@ __u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count, if (!checksum.data) return rc; - if (sk_make_hmac(sht->sht_name, key, message_count, messages, - iov_count, iovs, &checksum)) { + if (sk_make_hmac(algo, key, message_count, + messages, iov_count, iovs, &checksum)) { CDEBUG(D_SEC, "Failed to create checksum to validate\n"); goto cleanup; } @@ -404,23 +395,20 @@ cleanup: * to decrypt up to the number of bytes actually specified from the sender * (bd_nob) otherwise the calulated HMAC will be incorrect. */ static -__u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key, - int msgcnt, rawobj_t *msgs, int iovcnt, - lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token) +u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key, + int msgcnt, rawobj_t *msgs, int iovcnt, + lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token) { + struct cfs_crypto_hash_desc *desc; rawobj_t checksum = RAWOBJ_EMPTY; - struct crypto_hash *tfm; - struct hash_desc desc = { - .tfm = NULL, - .flags = 0, - }; + struct ahash_request *req; struct scatterlist sg[1]; + int rc = GSS_S_FAILURE; struct sg_table sgt; int bytes; int i; - int rc = GSS_S_FAILURE; - checksum.len = sht->sht_bytes; + checksum.len = cfs_crypto_hash_digestsize(sc_hmac); if (token->len < checksum.len) { CDEBUG(D_SEC, "Token received too short, expected %d " "received %d\n", token->len, checksum.len); @@ -431,31 +419,21 @@ __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key, if (!checksum.data) return rc; - tfm = crypto_alloc_hash(sht->sht_name, 0, 0); - if (IS_ERR(tfm)) + desc = cfs_crypto_hash_init(sc_hmac, key->data, key->len); + if (IS_ERR(desc)) goto cleanup; - desc.tfm = tfm; - - LASSERT(token->len >= crypto_hash_digestsize(tfm)); - - rc = crypto_hash_setkey(tfm, key->data, key->len); - if (rc) - goto hash_cleanup; - - rc = crypto_hash_init(&desc); - if (rc) - goto hash_cleanup; - + req = (struct ahash_request *) desc; for (i = 0; i < msgcnt; i++) { - if (msgs[i].len == 0) + if (!msgs[i].len) continue; rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len); if (rc != 0) goto hash_cleanup; - rc = crypto_hash_update(&desc, sg, msgs[i].len); + ahash_request_set_crypt(req, sg, NULL, msgs[i].len); + rc = crypto_ahash_update(req); if (rc) { gss_teardown_sgtable(&sgt); goto hash_cleanup; @@ -474,13 +452,12 @@ __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key, sg_init_table(sg, 1); sg_set_page(&sg[0], iovs[i].kiov_page, bytes, iovs[i].kiov_offset); - rc = crypto_hash_update(&desc, sg, bytes); + ahash_request_set_crypt(req, sg, NULL, bytes); + rc = crypto_ahash_update(req); if (rc) goto hash_cleanup; } - crypto_hash_final(&desc, checksum.data); - if (memcmp(token->data, checksum.data, checksum.len)) { rc = GSS_S_BAD_SIG; goto hash_cleanup; @@ -489,7 +466,7 @@ __u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key, rc = GSS_S_COMPLETE; hash_cleanup: - crypto_free_hash(tfm); + cfs_crypto_hash_final(desc, checksum.data, &checksum.len); cleanup: OBD_FREE_LARGE(checksum.data, checksum.len); @@ -506,7 +483,8 @@ __u32 gss_verify_mic_sk(struct gss_ctx *gss_context, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; - return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key, + + return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, message_count, messages, iov_count, iovs, token); } @@ -516,7 +494,7 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; - struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; + size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac); struct sk_wire skw; struct sk_hdr skh; rawobj_t msgbufs[3]; @@ -540,7 +518,7 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv); skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; - skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes; if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message, &skw.skw_cipher, 1)) return GSS_S_FAILURE; @@ -551,9 +529,9 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, msgbufs[2] = skw.skw_cipher; skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; - skw.skw_hmac.len = sht->sht_bytes; - if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0, - NULL, &skw.skw_hmac)) + skw.skw_hmac.len = sht_bytes; + if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, + 3, msgbufs, 0, NULL, &skw.skw_hmac)) return GSS_S_FAILURE; token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len; @@ -566,7 +544,7 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, rawobj_t *token, rawobj_t *message) { struct sk_ctx *skc = gss_context->internal_ctx_id; - struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; + size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac); struct sk_wire skw; struct sk_hdr *skh; rawobj_t msgbufs[3]; @@ -576,15 +554,15 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, LASSERT(skc->sc_session_kb.kb_tfm); - if (token->len < sizeof(skh) + sht->sht_bytes) + if (token->len < sizeof(skh) + sht_bytes) return GSS_S_DEFECTIVE_TOKEN; skw.skw_header.data = token->data; skw.skw_header.len = sizeof(struct sk_hdr); skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; - skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes; skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; - skw.skw_hmac.len = sht->sht_bytes; + skw.skw_hmac.len = sht_bytes; blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); if (skw.skw_cipher.len % blocksize != 0) @@ -599,8 +577,8 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, msgbufs[0] = skw.skw_header; msgbufs[1] = *gss_header; msgbufs[2] = skw.skw_cipher; - rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL, - &skw.skw_hmac); + rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs, + 0, NULL, &skw.skw_hmac); if (rc) return rc; @@ -809,7 +787,7 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context, int adj_nob) { struct sk_ctx *skc = gss_context->internal_ctx_id; - struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; + size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac); struct sk_wire skw; struct sk_hdr skh; __u8 local_iv[SK_IV_SIZE]; @@ -826,14 +804,14 @@ __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context, sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv); skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; - skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes; if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv, desc, &skw.skw_cipher, adj_nob)) return GSS_S_FAILURE; skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; - skw.skw_hmac.len = sht->sht_bytes; - if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher, + skw.skw_hmac.len = sht_bytes; + if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher, desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac)) return GSS_S_FAILURE; @@ -846,7 +824,7 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context, rawobj_t *token, int adj_nob) { struct sk_ctx *skc = gss_context->internal_ctx_id; - struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; + size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac); struct sk_wire skw; struct sk_hdr *skh; __u8 local_iv[SK_IV_SIZE]; @@ -854,25 +832,25 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context, LASSERT(skc->sc_session_kb.kb_tfm); - if (token->len < sizeof(skh) + sht->sht_bytes) + if (token->len < sizeof(skh) + sht_bytes) return GSS_S_DEFECTIVE_TOKEN; skw.skw_header.data = token->data; skw.skw_header.len = sizeof(struct sk_hdr); skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; - skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes; skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; - skw.skw_hmac.len = sht->sht_bytes; + skw.skw_hmac.len = sht_bytes; skh = (struct sk_hdr *)skw.skw_header.data; rc = sk_verify_header(skh); if (rc != GSS_S_COMPLETE) return rc; - rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac], - &skc->sc_hmac_key, 1, &skw.skw_cipher, - desc->bd_iov_count, GET_ENC_KIOV(desc), - desc->bd_nob, &skw.skw_hmac); + rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, + &skw.skw_cipher, desc->bd_iov_count, + GET_ENC_KIOV(desc), desc->bd_nob, + &skw.skw_hmac); if (rc) return rc; diff --git a/lustre/utils/gss/lgss_sk.c b/lustre/utils/gss/lgss_sk.c index 0a3d3fd..4b3d366 100644 --- a/lustre/utils/gss/lgss_sk.c +++ b/lustre/utils/gss/lgss_sk.c @@ -55,42 +55,6 @@ #define SK_DEFAULT_PRIME_BITS 2048 #define SK_DEFAULT_NODEMAP "default" -/* Names match up with openssl enc and dgst commands */ -char *sk_crypt2name[] = { - [SK_CRYPT_EMPTY] = "NONE", - [SK_CRYPT_AES256_CTR] = "AES-256-CTR", -}; - -char *sk_hmac2name[] = { - [SK_HMAC_EMPTY] = "NONE", - [SK_HMAC_SHA256] = "SHA256", - [SK_HMAC_SHA512] = "SHA512", -}; - -static int sk_name2crypt(char *name) -{ - int i; - - for (i = 0; i < SK_CRYPT_MAX; i++) { - if (strcasecmp(name, sk_crypt2name[i]) == 0) - return i; - } - - return SK_CRYPT_INVALID; -} - -static int sk_name2hmac(char *name) -{ - int i; - - for (i = 0; i < SK_HMAC_MAX; i++) { - if (strcasecmp(name, sk_hmac2name[i]) == 0) - return i; - } - - return SK_HMAC_INVALID; -} - static void usage(FILE *fp, char *program) { int i; @@ -104,14 +68,14 @@ static void usage(FILE *fp, char *program) fprintf(fp, "Modify/Write Options:\n"); fprintf(fp, "-c|--crypt Cipher for encryption " "(Default: AES Counter mode)\n"); - for (i = 1; i < SK_CRYPT_MAX; i++) - fprintf(fp, " %s\n", sk_crypt2name[i]); - + for (i = 1; i < ARRAY_SIZE(sk_crypt_algs); i++) + fprintf(fp, " %s\n", + sk_crypt_algs[i].sct_name); fprintf(fp, "-i|--hmac Hash algorithm for integrity " "(Default: SHA256)\n"); - for (i = 1; i < SK_HMAC_MAX; i++) - fprintf(fp, " %s\n", sk_hmac2name[i]); - + for (i = 1; i < ARRAY_SIZE(sk_hmac_algs); i++) + fprintf(fp, " %s\n", + sk_hmac_algs[i].sht_name); fprintf(fp, "-e|--expire Seconds before contexts from " "key expire (Default: %d seconds (%.3g days))\n", SK_DEFAULT_EXPIRE, (double)SK_DEFAULT_EXPIRE / 3600 / 24); @@ -126,8 +90,10 @@ static void usage(FILE *fp, char *program) "client)\n"); fprintf(fp, "-k|--key-bits Shared key length in bits " "(Default: %d)\n", SK_DEFAULT_SK_KEYLEN); - fprintf(fp, "-d|--data Key random data source " - "(Default: /dev/random)\n\n"); + fprintf(fp, "-d|--data Key data source for new keys " + "(Default: /dev/random)\n"); + fprintf(fp, " Not a seed value. " + "This is the actual key value.\n\n"); fprintf(fp, "Other Options:\n"); fprintf(fp, "-v|--verbose Increase verbosity for errors\n"); exit(EXIT_FAILURE); @@ -238,8 +204,8 @@ static int print_config(char *filename) if (config->skc_type & SK_TYPE_CLIENT) printf(" client"); printf("\n"); - printf("HMAC alg: %s\n", sk_hmac2name[config->skc_hmac_alg]); - printf("Crypto alg: %s\n", sk_crypt2name[config->skc_crypt_alg]); + printf("HMAC alg: %s\n", sk_hmac2name(config->skc_hmac_alg)); + printf("Crypto alg: %s\n", sk_crypt2name(config->skc_crypt_alg)); printf("Ctx Expiration: %u seconds\n", config->skc_expire); printf("Shared keylen: %u bits\n", config->skc_shared_keylen); printf("Prime length: %u bits\n", config->skc_prime_bits); @@ -330,7 +296,7 @@ int main(int argc, char **argv) int verbose = 0; int i; int opt; - enum sk_key_type type = SK_TYPE_INVALID; + enum sk_key_type type = SK_TYPE_INVALID; bool generate_prime = false; DH *dh; @@ -492,6 +458,12 @@ int main(int argc, char **argv) return EXIT_FAILURE; } + if (modify && datafile) { + fprintf(stderr, + "error: data file option not valid in key modify\n"); + return EXIT_FAILURE; + } + if (modify) { config = sk_read_file(modify); if (!config) diff --git a/lustre/utils/gss/sk_utils.c b/lustre/utils/gss/sk_utils.c index 49b5386..4b4a911 100644 --- a/lustre/utils/gss/sk_utils.c +++ b/lustre/utils/gss/sk_utils.c @@ -45,24 +45,6 @@ #define SK_PBKDF2_ITERATIONS 10000 -static struct sk_crypt_type sk_crypt_types[] = { - [SK_CRYPT_AES256_CTR] = { - .sct_name = "ctr(aes)", - .sct_bytes = 32, - }, -}; - -static struct sk_hmac_type sk_hmac_types[] = { - [SK_HMAC_SHA256] = { - .sht_name = "hmac(sha256)", - .sht_bytes = 32, - }, - [SK_HMAC_SHA512] = { - .sht_name = "hmac(sha512)", - .sht_bytes = 64, - }, -}; - #ifdef _NEW_BUILD_ # include "lgss_utils.h" #else @@ -364,18 +346,22 @@ int sk_validate_config(const struct sk_keyfile_config *config) printerr(0, "Null configuration passed\n"); return -1; } + if (config->skc_version != SK_CONF_VERSION) { printerr(0, "Invalid version\n"); return -1; } - if (config->skc_hmac_alg >= SK_HMAC_MAX) { + + if (config->skc_hmac_alg == SK_HMAC_INVALID) { printerr(0, "Invalid HMAC algorithm\n"); return -1; } - if (config->skc_crypt_alg >= SK_CRYPT_MAX) { + + if (config->skc_crypt_alg == SK_CRYPT_INVALID) { printerr(0, "Invalid crypt algorithm\n"); return -1; } + if (config->skc_expire < 60 || config->skc_expire > INT_MAX) { /* Try to limit key expiration to some reasonable minimum and * also prevent values over INT_MAX because there appears @@ -660,8 +646,8 @@ struct sk_cred *sk_create_cred(const char *tgt, const char *nodemap, kctx = &skc->sc_kctx; kctx->skc_version = config->skc_version; - kctx->skc_hmac_alg = config->skc_hmac_alg; - kctx->skc_crypt_alg = config->skc_crypt_alg; + strcpy(kctx->skc_hmac_alg, sk_hmac2name(config->skc_hmac_alg)); + strcpy(kctx->skc_crypt_alg, sk_crypt2name(config->skc_crypt_alg)); kctx->skc_expire = config->skc_expire; /* key payload format is in bits, convert to bytes */ @@ -781,12 +767,12 @@ uint32_t sk_gen_params(struct sk_cred *skc) * * \retval EVP_MD */ -static inline const EVP_MD *sk_hash_to_evp_md(enum sk_hmac_alg alg) +static inline const EVP_MD *sk_hash_to_evp_md(enum cfs_crypto_hash_alg alg) { switch (alg) { - case SK_HMAC_SHA256: + case CFS_HASH_ALG_SHA256: return EVP_sha256(); - case SK_HMAC_SHA512: + case CFS_HASH_ALG_SHA512: return EVP_sha512(); default: return EVP_md_null(); @@ -971,7 +957,8 @@ void sk_free_cred(struct sk_cred *skc) * If the size is smaller it will take copy the first N bytes necessary to * fill the derived key. */ int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key, - gss_buffer_desc *key_binding_bufs, int numbufs, int hmac_alg) + gss_buffer_desc *key_binding_bufs, int numbufs, + enum cfs_crypto_hash_alg hmac_alg) { size_t remain; size_t bytes; @@ -1003,7 +990,7 @@ int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key, return rc; } - if (sk_hmac_types[hmac_alg].sht_bytes != tmp_hash.length) { + if (cfs_crypto_hash_digestsize(hmac_alg) != tmp_hash.length) { free(tmp_hash.value); return -EINVAL; } @@ -1033,9 +1020,11 @@ int sk_session_kdf(struct sk_cred *skc, lnet_nid_t client_nid, struct sk_kernel_ctx *kctx = &skc->sc_kctx; gss_buffer_desc *session_key = &kctx->skc_session_key; gss_buffer_desc bufs[5]; + enum cfs_crypto_crypt_alg crypt_alg; int rc = -1; - session_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes; + crypt_alg = cfs_crypto_crypt_alg(kctx->skc_crypt_alg); + session_key->length = cfs_crypto_crypt_keysize(crypt_alg); session_key->value = malloc(session_key->length); if (!session_key->value) { printerr(0, "Failed to allocate memory for session key\n"); @@ -1057,7 +1046,7 @@ int sk_session_kdf(struct sk_cred *skc, lnet_nid_t client_nid, bufs[4] = *server_token; return sk_kdf(&kctx->skc_session_key, &kctx->skc_shared_key, bufs, - 5, kctx->skc_hmac_alg); + 5, cfs_crypto_hash_alg(kctx->skc_hmac_alg)); } /* Uses the session key to create an HMAC key and encryption key. In @@ -1089,18 +1078,21 @@ int sk_compute_keys(struct sk_cred *skc) gss_buffer_desc *session_key = &kctx->skc_session_key; gss_buffer_desc *hmac_key = &kctx->skc_hmac_key; gss_buffer_desc *encrypt_key = &kctx->skc_encrypt_key; + enum cfs_crypto_hash_alg hmac_alg; + enum cfs_crypto_crypt_alg crypt_alg; char *encrypt = "Encrypt"; char *integrity = "Integrity"; int rc; - hmac_key->length = sk_hmac_types[kctx->skc_hmac_alg].sht_bytes; + hmac_alg = cfs_crypto_hash_alg(kctx->skc_hmac_alg); + hmac_key->length = cfs_crypto_hash_digestsize(hmac_alg); hmac_key->value = malloc(hmac_key->length); if (!hmac_key->value) return -ENOMEM; rc = PKCS5_PBKDF2_HMAC(integrity, -1, session_key->value, session_key->length, SK_PBKDF2_ITERATIONS, - sk_hash_to_evp_md(kctx->skc_hmac_alg), + sk_hash_to_evp_md(hmac_alg), hmac_key->length, hmac_key->value); if (rc == 0) return -EINVAL; @@ -1109,14 +1101,15 @@ int sk_compute_keys(struct sk_cred *skc) if ((skc->sc_flags & LGSS_SVC_PRIV) == 0) return 0; - encrypt_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes; + crypt_alg = cfs_crypto_crypt_alg(kctx->skc_crypt_alg); + encrypt_key->length = cfs_crypto_crypt_keysize(crypt_alg); encrypt_key->value = malloc(encrypt_key->length); if (!encrypt_key->value) return -ENOMEM; rc = PKCS5_PBKDF2_HMAC(encrypt, -1, session_key->value, session_key->length, SK_PBKDF2_ITERATIONS, - sk_hash_to_evp_md(kctx->skc_hmac_alg), + sk_hash_to_evp_md(hmac_alg), encrypt_key->length, encrypt_key->value); if (rc == 0) return -EINVAL; diff --git a/lustre/utils/gss/sk_utils.h b/lustre/utils/gss/sk_utils.h index 6f7c027..75daee3 100644 --- a/lustre/utils/gss/sk_utils.h +++ b/lustre/utils/gss/sk_utils.h @@ -35,8 +35,22 @@ #include #include +#include #include "lsupport.h" +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(a) ((sizeof(a)) / (sizeof((a)[0]))) +#endif /* !ARRAY_SIZE */ + +/* LL_CRYPTO_MAX_NAME value must match value of + * CRYPTO_MAX_ALG_NAME in include/linux/crypto.h + */ +#ifdef HAVE_CRYPTO_MAX_ALG_NAME_128 +#define LL_CRYPTO_MAX_NAME 128 +#else +#define LL_CRYPTO_MAX_NAME 64 +#endif + /* Some limits and defaults */ #define SK_CONF_VERSION 1 #define SK_MSG_VERSION 1 @@ -113,8 +127,8 @@ struct sk_keyfile_config { /* Format passed to the kernel from userspace */ struct sk_kernel_ctx { uint32_t skc_version; - uint16_t skc_hmac_alg; - uint16_t skc_crypt_alg; + char skc_hmac_alg[LL_CRYPTO_MAX_NAME]; + char skc_crypt_alg[LL_CRYPTO_MAX_NAME]; uint32_t skc_expire; uint32_t skc_host_random; uint32_t skc_peer_random; @@ -137,6 +151,103 @@ struct sk_cred { DH *sc_params; }; +/* Names match up with openssl enc and dgst commands */ +/* When adding new alg types, make sure first occurrence's name + * matches cht_name in hash_types array. + */ +static const struct sk_crypt_type sk_crypt_algs[] = { + { + .sct_name = "null", + .sct_type = SK_CRYPT_EMPTY + }, + { + .sct_name = "NONE", + .sct_type = SK_CRYPT_EMPTY + }, + { + .sct_name = "ctr(aes)", + .sct_type = SK_CRYPT_AES256_CTR + }, + { + .sct_name = "AES-256-CTR", + .sct_type = SK_CRYPT_AES256_CTR + } +}; +static const struct sk_hmac_type sk_hmac_algs[] = { + { + .sht_name = "null", + .sht_type = SK_HMAC_EMPTY + }, + { + .sht_name = "NONE", + .sht_type = SK_HMAC_EMPTY + }, + { + .sht_name = "sha256", + .sht_type = SK_HMAC_SHA256 + }, + { + .sht_name = "SHA256", + .sht_type = SK_HMAC_SHA256 + }, + { + .sht_name = "sha512", + .sht_type = SK_HMAC_SHA512 + }, + { + .sht_name = "SHA512", + .sht_type = SK_HMAC_SHA512 + } +}; + +static inline int sk_name2crypt(char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sk_crypt_algs); i++) { + if (strcasecmp(name, sk_crypt_algs[i].sct_name) == 0) + return sk_crypt_algs[i].sct_type; + } + + return SK_CRYPT_INVALID; +} + +static inline int sk_name2hmac(char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sk_hmac_algs); i++) { + if (strcasecmp(name, sk_hmac_algs[i].sht_name) == 0) + return sk_hmac_algs[i].sht_type; + } + + return SK_HMAC_INVALID; +} + +static inline const char *sk_crypt2name(enum sk_crypt_alg type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sk_crypt_algs); i++) { + if (type == sk_crypt_algs[i].sct_type) + return sk_crypt_algs[i].sct_name; + } + + return NULL; +} + +static inline const char *sk_hmac2name(enum sk_hmac_alg type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sk_hmac_algs); i++) { + if (type == sk_hmac_algs[i].sht_type) + return sk_hmac_algs[i].sht_name; + } + + return NULL; +} + void sk_init_logging(char *program, int verbose, int fg); struct sk_keyfile_config *sk_read_file(char *filename); int sk_load_keyfile(char *path);