From 462037d5d302f408b840d2543de89b22f8eb7b73 Mon Sep 17 00:00:00 2001 From: Jeremy Filizetti Date: Tue, 9 Aug 2016 19:19:43 -0400 Subject: [PATCH] LU-3289 gss: Fix issues with SK privacy and integrity mode This patch has several fixes for skpi: 1. The original SK patches failed to account for out of order handling of RPCs and bulk pages during encryption. As a result clients would be out of sync with the IV used for decryption. This patches moves the encryption to a format similar to RFC3686 to handle these RPCs and bulk pages. 2. A header was added to the SK mode RPCs to allow versioning and send the unencrypted IV used for an RPC. The versioning will allow for future protocol changes. 3. Several changes to fix or impove security of the implementation based on a security review from Matthew Wood at Intel: - Derive a unique key for integrity modes instead of using the shared secret key (ska, ski, and skpi modes). This helps prevent replays. - Use PBKDF2 instead of HMAC to derive keys for integrity and encryption. - Have the server side pass a random value (like the client) and incorporate this value into the key binding information. Signed-off-by: Jeremy Filizetti Change-Id: I247187ecbd8cb23c602cec6a92eca938f135e564 Reviewed-on: http://review.whamcloud.com/21922 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Sebastien Buisson Reviewed-by: Oleg Drokin --- lustre/ptlrpc/gss/gss_crypto.c | 27 ++- lustre/ptlrpc/gss/gss_crypto.h | 2 +- lustre/ptlrpc/gss/gss_krb5_mech.c | 15 +- lustre/ptlrpc/gss/gss_sk_mech.c | 360 ++++++++++++++++++++++++-------------- lustre/utils/gss/lgss_sk_utils.c | 95 ++++++---- lustre/utils/gss/sk_utils.c | 283 ++++++++++++++++++++---------- lustre/utils/gss/sk_utils.h | 35 +++- lustre/utils/gss/svcgssd_proc.c | 169 +++++++++++------- 8 files changed, 644 insertions(+), 342 deletions(-) diff --git a/lustre/ptlrpc/gss/gss_crypto.c b/lustre/ptlrpc/gss/gss_crypto.c index 06f3542..a2ffd60 100644 --- a/lustre/ptlrpc/gss/gss_crypto.c +++ b/lustre/ptlrpc/gss/gss_crypto.c @@ -424,11 +424,8 @@ int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize) return 0; } -int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, - int use_internal_iv, - int inobj_cnt, - rawobj_t *inobjs, - rawobj_t *outobj, +int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, + int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, int enc) { struct blkcipher_desc desc; @@ -436,14 +433,14 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, struct scatterlist dst; struct sg_table sg_dst; struct sg_table sg_src; - __u8 local_iv[16] = {0}, *buf; + __u8 *buf; __u32 datalen = 0; int i, rc; ENTRY; buf = outobj->data; desc.tfm = tfm; - desc.info = local_iv; + desc.info = iv; desc.flags = 0; for (i = 0; i < inobj_cnt; i++) { @@ -461,14 +458,7 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, RETURN(rc); } - if (use_internal_iv) { - if (enc) - rc = crypto_blkcipher_encrypt(&desc, &dst, &src, - src.length); - else - rc = crypto_blkcipher_decrypt(&desc, &dst, &src, - src.length); - } else { + if (iv) { if (enc) rc = crypto_blkcipher_encrypt_iv(&desc, &dst, &src, @@ -477,6 +467,13 @@ int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, rc = crypto_blkcipher_decrypt_iv(&desc, &dst, &src, src.length); + } else { + if (enc) + rc = crypto_blkcipher_encrypt(&desc, &dst, &src, + src.length); + else + rc = crypto_blkcipher_decrypt(&desc, &dst, &src, + src.length); } gss_teardown_sgtable(&sg_src); diff --git a/lustre/ptlrpc/gss/gss_crypto.h b/lustre/ptlrpc/gss/gss_crypto.h index f16f31b..ad15cde 100644 --- a/lustre/ptlrpc/gss/gss_crypto.h +++ b/lustre/ptlrpc/gss/gss_crypto.h @@ -28,7 +28,7 @@ int gss_digest_norm(struct crypto_hash *tfm, struct gss_keyblock *kb, rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs, rawobj_t *cksum); int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize); -int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, int use_internal_iv, +int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv, int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj, int enc); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index acb6308..7d7a06f 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -940,6 +940,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, rawobj_t cksum = RAWOBJ_EMPTY; rawobj_t data_desc[3], cipher; __u8 conf[GSS_MAX_CIPHER_BLOCK]; + __u8 local_iv[16] = {0}; int rc = 0; LASSERT(ke); @@ -1038,7 +1039,8 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out_tfm, rc = -EACCES); } - rc = gss_crypt_rawobjs(arc4_tfm, 1, 3, data_desc, &cipher, 1); + rc = gss_crypt_rawobjs(arc4_tfm, NULL, 3, data_desc, + &cipher, 1); arc4_out_tfm: crypto_free_blkcipher(arc4_tfm); arc4_out_key: @@ -1046,8 +1048,8 @@ arc4_out_key: arc4_out: do {} while(0); /* just to avoid compile warning */ } else { - rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, 0, 3, data_desc, - &cipher, 1); + rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3, + data_desc, &cipher, 1); } if (rc != 0) { @@ -1231,6 +1233,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, rawobj_t hash_objs[3]; int rc = 0; __u32 major; + __u8 local_iv[16] = {0}; LASSERT(ke); @@ -1317,7 +1320,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx, GOTO(arc4_out_tfm, rc = -EACCES); } - rc = gss_crypt_rawobjs(arc4_tfm, 1, 1, &cipher_in, + rc = gss_crypt_rawobjs(arc4_tfm, NULL, 1, &cipher_in, &plain_out, 0); arc4_out_tfm: crypto_free_blkcipher(arc4_tfm); @@ -1326,8 +1329,8 @@ arc4_out_key: arc4_out: cksum = RAWOBJ_EMPTY; } else { - rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, 0, 1, &cipher_in, - &plain_out, 0); + rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 1, + &cipher_in, &plain_out, 0); } if (rc != 0) { diff --git a/lustre/ptlrpc/gss/gss_sk_mech.c b/lustre/ptlrpc/gss/gss_sk_mech.c index 39d3e76..f88e576 100644 --- a/lustre/ptlrpc/gss/gss_sk_mech.c +++ b/lustre/ptlrpc/gss/gss_sk_mech.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -47,21 +48,49 @@ #include "gss_asn1.h" #define SK_INTERFACE_VERSION 1 +#define SK_MSG_VERSION 1 #define SK_MIN_SIZE 8 +#define SK_IV_SIZE 16 + +/* Starting number for reverse contexts. It is critical to security + * that reverse contexts use a different range of numbers than regular + * contexts because they are using the same key. Therefore the IV/nonce + * combination must be unique for them. To accomplish this reverse contexts + * use the the negative range of a 64-bit number and regular contexts use the + * postive range. If the same IV/nonce combination were reused it would leak + * information about the plaintext. */ +#define SK_IV_REV_START (1UL << 63) struct sk_ctx { - __u32 sc_version; __u16 sc_hmac; __u16 sc_crypt; __u32 sc_expire; - rawobj_t sc_shared_key; - rawobj_t sc_iv; + __u32 sc_host_random; + __u32 sc_peer_random; + atomic64_t sc_iv; + rawobj_t sc_hmac_key; struct gss_keyblock sc_session_kb; }; +struct sk_hdr { + __u64 skh_version; + __u64 skh_iv; +} __attribute__((packed)); + +/* The format of SK wire data is similar to that of RFC3686 ESP Payload + * (section 3) except instead of just an IV there is a struct sk_hdr. + * --------------------------------------------------------------------- + * | struct sk_hdr | ciphertext (variable size) | HMAC (variable size) | + * --------------------------------------------------------------------- */ +struct sk_wire { + rawobj_t skw_header; + rawobj_t skw_cipher; + rawobj_t skw_hmac; +}; + static struct sk_crypt_type sk_crypt_types[] = { [SK_CRYPT_AES256_CTR] = { - .sct_name = "ctr(aes256)", + .sct_name = "ctr(aes)", .sct_bytes = 32, }, }; @@ -82,30 +111,50 @@ static inline unsigned long sk_block_mask(unsigned long len, int blocksize) return (len + blocksize - 1) & (~(blocksize - 1)); } -static int sk_init_keys(struct sk_ctx *skc) +static int sk_fill_header(struct sk_ctx *skc, struct sk_hdr *skh) { - int rc; - unsigned int ivsize; + __u64 tmp_iv; + skh->skh_version = be64_to_cpu(SK_MSG_VERSION); + + /* Always using inc_return so we don't use our initial numbers which + * could be the reuse detecting numbers */ + tmp_iv = atomic64_inc_return(&skc->sc_iv); + skh->skh_iv = be64_to_cpu(tmp_iv); + if (tmp_iv == 0 || tmp_iv == SK_IV_REV_START) { + CERROR("Counter looped, connection must be reset to avoid " + "plaintext information\n"); + return GSS_S_FAILURE; + } - rc = gss_keyblock_init(&skc->sc_session_kb, - sk_crypt_types[skc->sc_crypt].sct_name, 0); - if (rc) - return rc; + return GSS_S_COMPLETE; +} - ivsize = crypto_blkcipher_ivsize(skc->sc_session_kb.kb_tfm); - if (skc->sc_iv.len != ivsize) { - CERROR("IV size for algorithm (%d) does not match provided IV " - "size: %d\n", ivsize, skc->sc_iv.len); - return -EINVAL; - } +static int sk_verify_header(struct sk_hdr *skh) +{ + if (cpu_to_be64(skh->skh_version) != SK_MSG_VERSION) + return GSS_S_DEFECTIVE_TOKEN; + + return GSS_S_COMPLETE; +} - crypto_blkcipher_set_iv(skc->sc_session_kb.kb_tfm, - skc->sc_iv.data, skc->sc_iv.len); +void sk_construct_rfc3686_iv(__u8 *iv, __u32 nonce, __u64 partial_iv) +{ + __u32 ctr = cpu_to_be32(1); - return 0; + memcpy(iv, &nonce, CTR_RFC3686_NONCE_SIZE); + iv += CTR_RFC3686_NONCE_SIZE; + memcpy(iv, &partial_iv, CTR_RFC3686_IV_SIZE); + iv += CTR_RFC3686_IV_SIZE; + memcpy(iv, &ctr, sizeof(ctr)); +} + +static int sk_init_keys(struct sk_ctx *skc) +{ + return gss_keyblock_init(&skc->sc_session_kb, + sk_crypt_types[skc->sc_crypt].sct_name, 0); } -static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc) +static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc) { char *ptr = inbuf->data; char *end = inbuf->data + inbuf->len; @@ -127,7 +176,7 @@ static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc) CERROR("Failed to read HMAC algorithm type"); return -1; } - if (skc->sc_hmac >= SK_HMAC_MAX) { + if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) { CERROR("Invalid hmac type: %d\n", skc->sc_hmac); return -1; } @@ -137,7 +186,7 @@ static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc) CERROR("Failed to read crypt algorithm type"); return -1; } - if (skc->sc_crypt >= SK_CRYPT_MAX) { + if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) { CERROR("Invalid crypt type: %d\n", skc->sc_crypt); return -1; } @@ -149,24 +198,32 @@ static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc) } skc->sc_expire = tmp + cfs_time_current_sec(); - /* 5. Shared key */ - if (gss_get_rawobj(&ptr, end, &skc->sc_shared_key)) { - CERROR("Failed to read shared key"); + /* 5. host random is used as nonce for encryption */ + if (gss_get_bytes(&ptr, end, &skc->sc_host_random, + sizeof(skc->sc_host_random))) { + CERROR("Failed to read host random "); return -1; } - if (skc->sc_shared_key.len <= SK_MIN_SIZE) { - CERROR("Shared key must key must be larger than %d bytes\n", - SK_MIN_SIZE); + + /* 6. peer random is used as nonce for decryption */ + if (gss_get_bytes(&ptr, end, &skc->sc_peer_random, + sizeof(skc->sc_peer_random))) { + CERROR("Failed to read peer random "); return -1; } - /* 6. IV, can be empty if not using privacy mode */ - if (gss_get_rawobj(&ptr, end, &skc->sc_iv)) { - CERROR("Failed to read initialization vector "); + /* 7. HMAC key */ + if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) { + CERROR("Failed to read HMAC key"); + return -1; + } + if (skc->sc_hmac_key.len <= SK_MIN_SIZE) { + CERROR("HMAC key must key must be larger than %d bytes\n", + SK_MIN_SIZE); return -1; } - /* 7. Session key, can be empty if not using privacy mode */ + /* 8. Session key, can be empty if not using privacy mode */ if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) { CERROR("Failed to read session key"); return -1; @@ -175,13 +232,14 @@ static int fill_sk_context(rawobj_t *inbuf, struct sk_ctx *skc) return 0; } -static void delete_sk_context(struct sk_ctx *skc) +static void sk_delete_context(struct sk_ctx *skc) { if (!skc) return; + + rawobj_free(&skc->sc_hmac_key); gss_keyblock_free(&skc->sc_session_kb); - rawobj_free(&skc->sc_iv); - rawobj_free(&skc->sc_shared_key); + OBD_FREE_PTR(skc); } static @@ -197,14 +255,16 @@ __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context) if (!skc) return GSS_S_FAILURE; - if (fill_sk_context(inbuf, skc)) - goto out_error; + atomic64_set(&skc->sc_iv, 0); + + if (sk_fill_context(inbuf, skc)) + goto out_err; /* Only privacy mode needs to initialize keys */ if (skc->sc_session_kb.kb_key.len > 0) { privacy = true; if (sk_init_keys(skc)) - goto out_error; + goto out_err; } gss_context->internal_ctx_id = skc; @@ -213,9 +273,8 @@ __u32 gss_import_sec_context_sk(rawobj_t *inbuf, struct gss_ctx *gss_context) return GSS_S_COMPLETE; -out_error: - delete_sk_context(skc); - OBD_FREE_PTR(skc); +out_err: + sk_delete_context(skc); return GSS_S_FAILURE; } @@ -230,29 +289,31 @@ __u32 gss_copy_reverse_context_sk(struct gss_ctx *gss_context_old, if (!skc_new) return GSS_S_FAILURE; - skc_new->sc_crypt = skc_old->sc_crypt; skc_new->sc_hmac = skc_old->sc_hmac; + skc_new->sc_crypt = skc_old->sc_crypt; skc_new->sc_expire = skc_old->sc_expire; - if (rawobj_dup(&skc_new->sc_shared_key, &skc_old->sc_shared_key)) - goto out_error; - if (rawobj_dup(&skc_new->sc_iv, &skc_old->sc_iv)) - goto out_error; + skc_new->sc_host_random = skc_old->sc_host_random; + skc_new->sc_peer_random = skc_old->sc_peer_random; + + atomic64_set(&skc_new->sc_iv, SK_IV_REV_START); + + if (rawobj_dup(&skc_new->sc_hmac_key, &skc_old->sc_hmac_key)) + goto out_err; if (gss_keyblock_dup(&skc_new->sc_session_kb, &skc_old->sc_session_kb)) - goto out_error; + goto out_err; /* Only privacy mode needs to initialize keys */ if (skc_new->sc_session_kb.kb_key.len > 0) if (sk_init_keys(skc_new)) - goto out_error; + goto out_err; gss_context_new->internal_ctx_id = skc_new; CDEBUG(D_SEC, "successfully copied reverse sk context\n"); return GSS_S_COMPLETE; -out_error: - delete_sk_context(skc_new); - OBD_FREE_PTR(skc_new); +out_err: + sk_delete_context(skc_new); return GSS_S_FAILURE; } @@ -297,7 +358,7 @@ __u32 gss_get_mic_sk(struct gss_ctx *gss_context, { struct sk_ctx *skc = gss_context->internal_ctx_id; return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name, - &skc->sc_shared_key, message_count, messages, + &skc->sc_hmac_key, message_count, messages, iov_count, iovs, token); } @@ -446,7 +507,7 @@ __u32 gss_verify_mic_sk(struct gss_ctx *gss_context, rawobj_t *token) { struct sk_ctx *skc = gss_context->internal_ctx_id; - return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_shared_key, + return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key, message_count, messages, iov_count, iovs, token); } @@ -457,38 +518,46 @@ __u32 gss_wrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, { struct sk_ctx *skc = gss_context->internal_ctx_id; struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; - rawobj_t msgbufs[2]; - rawobj_t cipher; - rawobj_t checksum; + struct sk_wire skw; + struct sk_hdr skh; + rawobj_t msgbufs[3]; + __u8 local_iv[SK_IV_SIZE]; unsigned int blocksize; LASSERT(skc->sc_session_kb.kb_tfm); - blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); + blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); if (gss_add_padding(message, message_buffer_length, blocksize)) return GSS_S_FAILURE; - /* Only encrypting the message data */ - cipher.data = token->data; - cipher.len = token->len - sht->sht_bytes; - if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, message, - &cipher, 1)) + memset(token->data, 0, token->len); + + if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE) + return GSS_S_FAILURE; + + skw.skw_header.data = token->data; + skw.skw_header.len = sizeof(skh); + memcpy(skw.skw_header.data, &skh, sizeof(skh)); + + sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv); + skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message, + &skw.skw_cipher, 1)) return GSS_S_FAILURE; - /* Checksum covers the GSS header followed by the encrypted message */ - msgbufs[0].len = gss_header->len; - msgbufs[0].data = gss_header->data; - msgbufs[1].len = cipher.len; - msgbufs[1].data = cipher.data; + /* HMAC covers the SK header, GSS header, and ciphertext */ + msgbufs[0] = skw.skw_header; + msgbufs[1] = *gss_header; + msgbufs[2] = skw.skw_cipher; - LASSERT(cipher.len + sht->sht_bytes <= token->len); - checksum.data = token->data + cipher.len; - checksum.len = sht->sht_bytes; - if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 2, msgbufs, 0, - NULL, &checksum)) + skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; + skw.skw_hmac.len = sht->sht_bytes; + if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0, + NULL, &skw.skw_hmac)) return GSS_S_FAILURE; - token->len = cipher.len + checksum.len; + token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len; return GSS_S_COMPLETE; } @@ -499,39 +568,47 @@ __u32 gss_unwrap_sk(struct gss_ctx *gss_context, rawobj_t *gss_header, { struct sk_ctx *skc = gss_context->internal_ctx_id; struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; - rawobj_t msgbufs[2]; - rawobj_t cipher; - rawobj_t checksum; + struct sk_wire skw; + struct sk_hdr *skh; + rawobj_t msgbufs[3]; + __u8 local_iv[SK_IV_SIZE]; unsigned int blocksize; int rc; LASSERT(skc->sc_session_kb.kb_tfm); - blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); - if (token->len < sht->sht_bytes) + if (token->len < sizeof(skh) + sht->sht_bytes) return GSS_S_DEFECTIVE_TOKEN; - cipher.data = token->data; - cipher.len = token->len - sht->sht_bytes; - checksum.data = token->data + cipher.len; - checksum.len = sht->sht_bytes; + skw.skw_header.data = token->data; + skw.skw_header.len = sizeof(struct sk_hdr); + skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; + skw.skw_hmac.len = sht->sht_bytes; - if (cipher.len % blocksize != 0) + blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm); + if (skw.skw_cipher.len % blocksize != 0) return GSS_S_DEFECTIVE_TOKEN; - /* Checksum covers the GSS header followed by the encrypted message */ - msgbufs[0].len = gss_header->len; - msgbufs[0].data = gss_header->data; - msgbufs[1].len = cipher.len; - msgbufs[1].data = cipher.data; - rc = sk_verify_hmac(sht, &skc->sc_shared_key, 2, msgbufs, 0, NULL, - &checksum); + skh = (struct sk_hdr *)skw.skw_header.data; + rc = sk_verify_header(skh); + if (rc != GSS_S_COMPLETE) + return rc; + + /* HMAC covers the SK header, GSS header, and ciphertext */ + msgbufs[0] = skw.skw_header; + msgbufs[1] = *gss_header; + msgbufs[2] = skw.skw_cipher; + rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL, + &skw.skw_hmac); if (rc) return rc; - message->len = cipher.len; - if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, 0, 1, &cipher, - message, 0)) + sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv); + message->len = skw.skw_cipher.len; + if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, + 1, &skw.skw_cipher, message, 0)) return GSS_S_FAILURE; return GSS_S_COMPLETE; @@ -564,14 +641,13 @@ __u32 gss_prep_bulk_sk(struct gss_ctx *gss_context, return GSS_S_COMPLETE; } -static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, - struct ptlrpc_bulk_desc *desc, - rawobj_t *cipher, +static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, + struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, int adj_nob) { struct blkcipher_desc cdesc = { .tfm = tfm, - .info = NULL, + .info = iv, .flags = 0, }; struct scatterlist ptxt; @@ -591,8 +667,7 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, sk_block_mask(BD_GET_KIOV(desc, i).kiov_len, blocksize), BD_GET_KIOV(desc, i).kiov_offset); - if (adj_nob) - nob += ptxt.length; + nob += ptxt.length; sg_set_page(&ctxt, BD_GET_ENC_KIOV(desc, i).kiov_page, ptxt.length, ptxt.offset); @@ -600,8 +675,8 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, BD_GET_ENC_KIOV(desc, i).kiov_offset = ctxt.offset; BD_GET_ENC_KIOV(desc, i).kiov_len = ctxt.length; - rc = crypto_blkcipher_encrypt(&cdesc, &ctxt, &ptxt, - ptxt.length); + rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt, + ptxt.length); if (rc) { CERROR("failed to encrypt page: %d\n", rc); return rc; @@ -614,14 +689,13 @@ static __u32 sk_encrypt_bulk(struct crypto_blkcipher *tfm, return 0; } -static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, - struct ptlrpc_bulk_desc *desc, - rawobj_t *cipher, +static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, __u8 *iv, + struct ptlrpc_bulk_desc *desc, rawobj_t *cipher, int adj_nob) { struct blkcipher_desc cdesc = { .tfm = tfm, - .info = NULL, + .info = iv, .flags = 0, }; struct scatterlist ptxt; @@ -691,8 +765,8 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, if (piov->kiov_len % blocksize == 0) sg_assign_page(&ptxt, piov->kiov_page); - rc = crypto_blkcipher_decrypt(&cdesc, &ptxt, &ctxt, - ctxt.length); + rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt, + ctxt.length); if (rc) { CERROR("Decryption failed for page: %d\n", rc); return GSS_S_FAILURE; @@ -732,26 +806,36 @@ static __u32 sk_decrypt_bulk(struct crypto_blkcipher *tfm, static __u32 gss_wrap_bulk_sk(struct gss_ctx *gss_context, - struct ptlrpc_bulk_desc *desc, rawobj_t *token, - int adj_nob) + struct ptlrpc_bulk_desc *desc, rawobj_t *token, + int adj_nob) { struct sk_ctx *skc = gss_context->internal_ctx_id; struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; - rawobj_t cipher = RAWOBJ_EMPTY; - rawobj_t checksum = RAWOBJ_EMPTY; + struct sk_wire skw; + struct sk_hdr skh; + __u8 local_iv[SK_IV_SIZE]; - cipher.data = token->data; - cipher.len = token->len - sht->sht_bytes; - memset(token->data, 0, token->len); + LASSERT(skc->sc_session_kb.kb_tfm); - if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob)) + memset(token->data, 0, token->len); + if (sk_fill_header(skc, &skh) != GSS_S_COMPLETE) return GSS_S_FAILURE; - checksum.data = token->data + cipher.len; - checksum.len = sht->sht_bytes; + skw.skw_header.data = token->data; + skw.skw_header.len = sizeof(skh); + memcpy(skw.skw_header.data, &skh, sizeof(skh)); + + sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv); + skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv, + desc, &skw.skw_cipher, adj_nob)) + return GSS_S_FAILURE; - if (sk_make_hmac(sht->sht_name, &skc->sc_shared_key, 1, &cipher, - desc->bd_iov_count, GET_ENC_KIOV(desc), &checksum)) + skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; + skw.skw_hmac.len = sht->sht_bytes; + if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher, + desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac)) return GSS_S_FAILURE; return GSS_S_COMPLETE; @@ -764,23 +848,38 @@ __u32 gss_unwrap_bulk_sk(struct gss_ctx *gss_context, { struct sk_ctx *skc = gss_context->internal_ctx_id; struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac]; - rawobj_t cipher = RAWOBJ_EMPTY; - rawobj_t checksum = RAWOBJ_EMPTY; + struct sk_wire skw; + struct sk_hdr *skh; + __u8 local_iv[SK_IV_SIZE]; int rc; - cipher.data = token->data; - cipher.len = token->len - sht->sht_bytes; - checksum.data = token->data + cipher.len; - checksum.len = sht->sht_bytes; + LASSERT(skc->sc_session_kb.kb_tfm); + + if (token->len < sizeof(skh) + sht->sht_bytes) + return GSS_S_DEFECTIVE_TOKEN; + + skw.skw_header.data = token->data; + skw.skw_header.len = sizeof(struct sk_hdr); + skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len; + skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes; + skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len; + skw.skw_hmac.len = sht->sht_bytes; + + skh = (struct sk_hdr *)skw.skw_header.data; + rc = sk_verify_header(skh); + if (rc != GSS_S_COMPLETE) + return rc; rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac], - &skc->sc_shared_key, 1, &cipher, + &skc->sc_hmac_key, 1, &skw.skw_cipher, desc->bd_iov_count, GET_ENC_KIOV(desc), - desc->bd_nob, &checksum); + desc->bd_nob, &skw.skw_hmac); if (rc) return rc; - rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, desc, &cipher, adj_nob); + sk_construct_rfc3686_iv(local_iv, skc->sc_peer_random, skh->skh_iv); + rc = sk_decrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv, + desc, &skw.skw_cipher, adj_nob); if (rc) return rc; @@ -791,8 +890,7 @@ static void gss_delete_sec_context_sk(void *internal_context) { struct sk_ctx *sk_context = internal_context; - delete_sk_context(sk_context); - OBD_FREE_PTR(sk_context); + sk_delete_context(sk_context); } int gss_display_sk(struct gss_ctx *gss_context, char *buf, int bufsize) diff --git a/lustre/utils/gss/lgss_sk_utils.c b/lustre/utils/gss/lgss_sk_utils.c index 39cd25d..4f9ca53 100644 --- a/lustre/utils/gss/lgss_sk_utils.c +++ b/lustre/utils/gss/lgss_sk_utils.c @@ -88,9 +88,9 @@ static void lgss_sk_release_cred(struct lgss_cred *cred) static int lgss_sk_using_cred(struct lgss_cred *cred) { struct sk_cred *skc = cred->lc_mech_cred; - gss_buffer_desc bufs[7]; + gss_buffer_desc bufs[SK_INIT_BUFFERS]; + uint32_t version; uint32_t flags; - int numbufs = 7; int rc; rc = sk_gen_params(skc, true); @@ -98,25 +98,28 @@ static int lgss_sk_using_cred(struct lgss_cred *cred) return rc; /* HMAC is generated in this order */ - bufs[0] = skc->sc_kctx.skc_iv; - bufs[1] = skc->sc_p; - bufs[2] = skc->sc_pub_key; - bufs[3] = skc->sc_tgt; - bufs[4] = skc->sc_nodemap_hash; - - /* big endian flags for the wire */ + version = htobe32(SK_MSG_VERSION); + bufs[SK_INIT_VERSION].value = &version; + bufs[SK_INIT_VERSION].length = sizeof(version); + bufs[SK_INIT_RANDOM].value = &skc->sc_kctx.skc_host_random; + bufs[SK_INIT_RANDOM].length = sizeof(skc->sc_kctx.skc_host_random); + bufs[SK_INIT_PUB_KEY] = skc->sc_pub_key; + bufs[SK_INIT_P] = skc->sc_p; + bufs[SK_INIT_TARGET] = skc->sc_tgt; + bufs[SK_INIT_NODEMAP] = skc->sc_nodemap_hash; flags = htobe64(skc->sc_flags); - bufs[5].value = &flags; - bufs[5].length = sizeof(flags); + bufs[SK_INIT_FLAGS].value = &flags; + bufs[SK_INIT_FLAGS].length = sizeof(flags); /* sign all the bufs except HMAC */ - rc = sk_sign_bufs(&skc->sc_kctx.skc_shared_key, bufs, numbufs - 1, - EVP_sha256(), &skc->sc_hmac); + rc = sk_sign_bufs(&skc->sc_kctx.skc_shared_key, bufs, + SK_INIT_BUFFERS - 1, EVP_sha256(), + &skc->sc_hmac); if (rc) return rc; - bufs[6] = skc->sc_hmac; - rc = sk_encode_netstring(bufs, numbufs, &cred->lc_mech_token); + bufs[SK_INIT_HMAC] = skc->sc_hmac; + rc = sk_encode_netstring(bufs, SK_INIT_BUFFERS, &cred->lc_mech_token); if (rc) return rc; @@ -130,27 +133,50 @@ static int lgss_sk_validate_cred(struct lgss_cred *cred, gss_buffer_desc *token, gss_buffer_desc *ctx_token) { struct sk_cred *skc = cred->lc_mech_cred; - gss_buffer_desc bufs[2]; - int numbufs = 2; + gss_buffer_desc bufs[SK_RESP_BUFFERS]; + uint32_t version; int i; uint32_t rc; - i = sk_decode_netstring(bufs, numbufs, token); - if (i < numbufs) { - printerr(0, "Failed to decode netstring\n"); - return -1; + /* Decode responder buffers and validate */ + i = sk_decode_netstring(bufs, SK_RESP_BUFFERS, token); + if (i != SK_RESP_BUFFERS) { + printerr(0, "Invalid token received\n"); + return -EINVAL; } - /* decoded buffers from server should be: - * bufs[0] = sc_pub_key - * bufs[1] = sc_hmac */ - rc = sk_verify_hmac(skc, bufs, numbufs - 1, EVP_sha256(), &bufs[1]); + rc = sk_verify_hmac(skc, bufs, SK_RESP_BUFFERS - 1, EVP_sha256(), + &bufs[SK_RESP_HMAC]); if (rc != GSS_S_COMPLETE) { printerr(0, "Invalid HMAC receieved: 0x%x\n", rc); - return -1; + return -EINVAL; + } + + if (bufs[SK_RESP_VERSION].length != sizeof(version)) { + printerr(0, "Invalid version received (wrong size)\n"); + return -EINVAL; + } + memcpy(&version, bufs[SK_RESP_VERSION].value, sizeof(version)); + version = be32toh(version); + if (version != SK_MSG_VERSION) { + printerr(0, "Invalid version received: %d\n", version); + return -EINVAL; + } + + /* In the rare event that both the random values are equal the + * client has the responsability to retry the connection attempt + * otherwise we would leak information about the plain text by + * reuusing IVs as both peer and host use the same values other + * than the nonce. */ + memcpy(&skc->sc_kctx.skc_peer_random, bufs[SK_RESP_RANDOM].value, + sizeof(skc->sc_kctx.skc_peer_random)); + if (skc->sc_kctx.skc_host_random == skc->sc_kctx.skc_peer_random) { + printerr(0, "Host and peer randoms are equal, must retry to " + "ensure unique value for nonce\n"); + return -EAGAIN; } - rc = sk_compute_key(skc, &bufs[0]); + rc = sk_compute_dh_key(skc, &bufs[SK_RESP_PUB_KEY]); if (rc == GSS_S_DEFECTIVE_TOKEN) { /* Defective token for short key means we need to retry * because there is a chance that the parameters generated @@ -159,18 +185,25 @@ static int lgss_sk_validate_cred(struct lgss_cred *cred, gss_buffer_desc *token, return -EAGAIN; } else if (rc != GSS_S_COMPLETE) { printerr(0, "Failed to compute session key: 0x%x\n", rc); - return -1; + return -EINVAL; } - rc = sk_kdf(skc, cred->lc_self_nid, &cred->lc_mech_token); + rc = sk_session_kdf(skc, cred->lc_self_nid, &cred->lc_mech_token, + token); if (rc) { printerr(0, "Failed to calulate derived key\n"); - return -1; + return -EINVAL; + } + + rc = sk_compute_keys(skc); + if (rc) { + printerr(0, "Failed to compute HMAC and session key\n"); + return -EINVAL; } if (sk_serialize_kctx(skc, ctx_token)) { printerr(0, "Failed to serialize context for kernel\n"); - return -1; + return -EINVAL; } return 0; diff --git a/lustre/utils/gss/sk_utils.c b/lustre/utils/gss/sk_utils.c index f13eebd..f38510f 100644 --- a/lustre/utils/gss/sk_utils.c +++ b/lustre/utils/gss/sk_utils.c @@ -42,6 +42,8 @@ #include "sk_utils.h" #include "write_bytes.h" +#define SK_PBKDF2_ITERATIONS 10000 + static struct sk_crypt_type sk_crypt_types[] = { [SK_CRYPT_AES256_CTR] = { .sct_name = "ctr(aes)", @@ -663,13 +665,28 @@ struct sk_cred *sk_create_cred(const char *tgt, const char *nodemap, return skc; out_err: - if (skc) - sk_free_cred(skc); + sk_free_cred(skc); free(config); return NULL; } +static void sk_free_parameters(struct sk_cred *skc) +{ + if (skc->sc_params) + DH_free(skc->sc_params); + if (skc->sc_p.value) + free(skc->sc_p.value); + if (skc->sc_pub_key.value) + free(skc->sc_pub_key.value); + + skc->sc_params = NULL; + skc->sc_p.value = NULL; + skc->sc_p.length = 0; + skc->sc_pub_key.value = NULL; + skc->sc_pub_key.length = 0; +} + /** * Generates a public key and computes the private key for the DH key exchange. * The parameters must be populated with the p and g from the peer. @@ -739,21 +756,6 @@ static uint32_t sk_gen_responder_params(struct sk_cred *skc) return GSS_S_COMPLETE; } -static void sk_free_parameters(struct sk_cred *skc) -{ - if (skc->sc_params) - DH_free(skc->sc_params); - if (skc->sc_p.value) - free(skc->sc_p.value); - if (skc->sc_pub_key.value) - free(skc->sc_pub_key.value); - - skc->sc_p.value = NULL; - skc->sc_p.length = 0; - skc->sc_pub_key.value = NULL; - skc->sc_pub_key.length = 0; -} - /** * Generates shared key Diffie Hellman parameters used for the DH key exchange * between host and peer if privacy mode is enabled @@ -766,31 +768,11 @@ static void sk_free_parameters(struct sk_cred *skc) */ static uint32_t sk_gen_initiator_params(struct sk_cred *skc) { - gss_buffer_desc *iv = &skc->sc_kctx.skc_iv; int rc; /* The credential could be used so free existing parameters */ sk_free_parameters(skc); - /* Pseudo random should be sufficient here because the IV will be used - * with a key that is used only once. This also should ensure we have - * unqiue tokens that are sent to the remote server which is important - * because the token is hashed for the sunrpc cache lookups and a - * failure there would cause connection attempts to fail indefinitely - * due to the large timeout value on the server side sunrpc cache - * (INT_MAX) */ - iv->length = SK_IV_SIZE; - iv->value = malloc(iv->length); - if (!iv->value) { - printerr(0, "Failed to allocate memory for IV\n"); - return GSS_S_FAILURE; - } - memset(iv->value, 0, iv->length); - if (RAND_bytes(iv->value, iv->length) != 1) { - printerr(0, "Failed to get data for IV\n"); - return GSS_S_FAILURE; - } - /* Only privacy mode needs the rest of the parameter generation * but we use IV in other modes as well so tokens should be * unique */ @@ -847,6 +829,23 @@ static uint32_t sk_gen_initiator_params(struct sk_cred *skc) */ uint32_t sk_gen_params(struct sk_cred *skc, const bool initiator) { + uint32_t random; + + /* Random value used by both the request and response as part of the + * key binding material. This also should ensure we have unqiue + * tokens that are sent to the remote server which is important because + * the token is hashed for the sunrpc cache lookups and a failure there + * would cause connection attempts to fail indefinitely due to the large + * timeout value on the server side */ + if (RAND_bytes((unsigned char *)&random, sizeof(random)) != 1) { + printerr(0, "Failed to get data for random parameter\n"); + return GSS_S_FAILURE; + } + + /* The random value will always be used in byte range operations + * so we keep it as big endian from this point on */ + skc->sc_kctx.skc_host_random = htobe32(random); + if (initiator) return sk_gen_initiator_params(skc); @@ -988,6 +987,9 @@ uint32_t sk_verify_hmac(struct sk_cred *skc, gss_buffer_desc *bufs, */ void sk_free_cred(struct sk_cred *skc) { + if (!skc) + return; + if (skc->sc_p.value) free(skc->sc_p.value); if (skc->sc_pub_key.value) @@ -1005,16 +1007,21 @@ void sk_free_cred(struct sk_cred *skc) skc->sc_dh_shared_key.length); free(skc->sc_dh_shared_key.value); } + if (skc->sc_kctx.skc_hmac_key.value) { + memset(skc->sc_kctx.skc_hmac_key.value, 0, + skc->sc_kctx.skc_hmac_key.length); + free(skc->sc_kctx.skc_hmac_key.value); + } + if (skc->sc_kctx.skc_encrypt_key.value) { + memset(skc->sc_kctx.skc_encrypt_key.value, 0, + skc->sc_kctx.skc_encrypt_key.length); + free(skc->sc_kctx.skc_encrypt_key.value); + } if (skc->sc_kctx.skc_shared_key.value) { memset(skc->sc_kctx.skc_shared_key.value, 0, skc->sc_kctx.skc_shared_key.length); free(skc->sc_kctx.skc_shared_key.value); } - if (skc->sc_kctx.skc_iv.value) { - memset(skc->sc_kctx.skc_iv.value, 0, - skc->sc_kctx.skc_iv.length); - free(skc->sc_kctx.skc_iv.value); - } if (skc->sc_kctx.skc_session_key.value) { memset(skc->sc_kctx.skc_session_key.value, 0, skc->sc_kctx.skc_session_key.length); @@ -1025,6 +1032,66 @@ void sk_free_cred(struct sk_cred *skc) DH_free(skc->sc_params); free(skc); + skc = NULL; +} + +/* This function handles key derivation using the hash algorithm specified in + * \a hash_alg, buffers in \a key_binding_bufs, and original key in + * \a origin_key to produce a \a derived_key. The first element of the + * key_binding_bufs array is reserved for the counter used in the KDF. The + * derived key in \a derived_key could differ in size from \a origin_key and + * must be populated with the expected size and a valid buffer to hold the + * contents. + * + * If the derived key size is greater than the HMAC algorithm size it will be + * a done using several iterations of a counter and the key binding bufs. + * + * If the size is smaller it will take copy the first N bytes necessary to + * fill the derived key. */ +int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key, + gss_buffer_desc *key_binding_bufs, int numbufs, int hmac_alg) +{ + size_t remain; + size_t bytes; + uint32_t counter; + char *keydata; + gss_buffer_desc tmp_hash; + int i; + int rc; + + if (numbufs < 1) + return -EINVAL; + + /* Use a counter as the first buffer followed by the key binding + * buffers in the event we need more than one a single cycle to + * produced a symmetric key large enough in size */ + key_binding_bufs[0].value = &counter; + key_binding_bufs[0].length = sizeof(counter); + + remain = derived_key->length; + keydata = derived_key->value; + i = 0; + while (remain > 0) { + counter = htobe32(i++); + rc = sk_sign_bufs(origin_key, key_binding_bufs, numbufs, + sk_hash_to_evp_md(hmac_alg), &tmp_hash); + if (rc) { + if (tmp_hash.value) + free(tmp_hash.value); + return rc; + } + + LASSERT(sk_hmac_types[hmac_alg].sht_bytes == + tmp_hash.length); + + bytes = (remain < tmp_hash.length) ? remain : tmp_hash.length; + memcpy(keydata, tmp_hash.value, bytes); + free(tmp_hash.value); + remain -= bytes; + keydata += bytes; + } + + return 0; } /* Populates the sk_cred's session_key using the a Key Derviation Function (KDF) @@ -1036,24 +1103,14 @@ void sk_free_cred(struct sk_cred *skc) * \return -1 failure * \return 0 success */ -int sk_kdf(struct sk_cred *skc, lnet_nid_t client_nid, - gss_buffer_desc *key_binding_input) +int sk_session_kdf(struct sk_cred *skc, lnet_nid_t client_nid, + gss_buffer_desc *client_token, gss_buffer_desc *server_token) { struct sk_kernel_ctx *kctx = &skc->sc_kctx; gss_buffer_desc *session_key = &kctx->skc_session_key; - gss_buffer_desc bufs[4]; - gss_buffer_desc tmp_hash; - char *skp; - size_t remain; - size_t bytes; - uint32_t counter; - int i; + gss_buffer_desc bufs[5]; int rc = -1; - /* No keys computed unless privacy mode is in use */ - if ((skc->sc_flags & LGSS_SVC_PRIV) == 0) - return 0; - session_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes; session_key->value = malloc(session_key->length); if (!session_key->value) { @@ -1061,42 +1118,84 @@ int sk_kdf(struct sk_cred *skc, lnet_nid_t client_nid, return rc; } - /* Use the HMAC algorithm provided by in the shared key file to derive - * a session key. eg: HMAC(key, msg) - * key: the shared key provided in the shared key file - * msg is the bytes in the following order: - * 1. big_endian(counter) - * 2. DH shared key - * 3. Clients NIDs - * 4. key_binding_input */ - bufs[0].value = &counter; - bufs[0].length = sizeof(counter); + /* Key binding info ordering + * 1. Reserved for counter + * 1. DH shared key + * 2. Client's NIDs + * 3. Client's token + * 4. Server's token */ + bufs[0].value = NULL; + bufs[0].length = 0; bufs[1] = skc->sc_dh_shared_key; bufs[2].value = &client_nid; bufs[2].length = sizeof(client_nid); - bufs[3] = *key_binding_input; + bufs[3] = *client_token; + bufs[4] = *server_token; - remain = session_key->length; - skp = session_key->value; - i = 0; - while (remain > 0) { - counter = be32toh(i++); - rc = sk_sign_bufs(&kctx->skc_shared_key, bufs, 4, - sk_hash_to_evp_md(kctx->skc_hmac_alg), &tmp_hash); - if (rc) { - free(tmp_hash.value); - return rc; - } + return sk_kdf(&kctx->skc_session_key, &kctx->skc_shared_key, bufs, + 5, kctx->skc_hmac_alg); +} - LASSERT(sk_hmac_types[kctx->skc_hmac_alg].sht_bytes == - tmp_hash.length); +/* Uses the session key to create an HMAC key and encryption key. In + * integrity mode the session key used to generate the HMAC key uses + * session information which is available on the wire but by creating + * a session based HMAC key we can prevent potential replay as both the + * client and server have random numbers used as part of the key creation. + * + * The keys used for integrity and privacy are formulated as below using + * the session key that is the output of the key derivation function. The + * HMAC algorithm is determined by the shared key algorithm selected in the + * key file. + * + * For ski mode: + * Session HMAC Key = PBKDF2("Integrity", KDF derived Session Key) + * + * For skpi mode: + * Session HMAC Key = PBKDF2("Integrity", KDF derived Session Key) + * Session Encryption Key = PBKDF2("Encrypt", KDF derived Session Key) + * + * \param[in,out] skc Shared key credentials structure with + * + * \return -1 failure + * \return 0 success + */ +int sk_compute_keys(struct sk_cred *skc) +{ + struct sk_kernel_ctx *kctx = &skc->sc_kctx; + gss_buffer_desc *session_key = &kctx->skc_session_key; + gss_buffer_desc *hmac_key = &kctx->skc_hmac_key; + gss_buffer_desc *encrypt_key = &kctx->skc_encrypt_key; + char *encrypt = "Encrypt"; + char *integrity = "Integrity"; + int rc; - bytes = (remain < tmp_hash.length) ? remain : tmp_hash.length; - memcpy(skp, tmp_hash.value, bytes); - free(tmp_hash.value); - remain -= bytes; - skp += bytes; - } + hmac_key->length = sk_hmac_types[kctx->skc_hmac_alg].sht_bytes; + hmac_key->value = malloc(hmac_key->length); + if (!hmac_key->value) + return -ENOMEM; + + rc = PKCS5_PBKDF2_HMAC(integrity, -1, session_key->value, + session_key->length, SK_PBKDF2_ITERATIONS, + sk_hash_to_evp_md(kctx->skc_hmac_alg), + hmac_key->length, hmac_key->value); + if (rc == 0) + return -EINVAL; + + /* Encryption key is only populated in privacy mode */ + if ((skc->sc_flags & LGSS_SVC_PRIV) == 0) + return 0; + + encrypt_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes; + encrypt_key->value = malloc(encrypt_key->length); + if (!encrypt_key->value) + return -ENOMEM; + + rc = PKCS5_PBKDF2_HMAC(encrypt, -1, session_key->value, + session_key->length, SK_PBKDF2_ITERATIONS, + sk_hash_to_evp_md(kctx->skc_hmac_alg), + encrypt_key->length, encrypt_key->value); + if (rc == 0) + return -EINVAL; return 0; } @@ -1112,7 +1211,7 @@ int sk_kdf(struct sk_cred *skc, lnet_nid_t client_nid, * \return gss error failure * \return GSS_S_COMPLETE success */ -uint32_t sk_compute_key(struct sk_cred *skc, const gss_buffer_desc *pub_key) +uint32_t sk_compute_dh_key(struct sk_cred *skc, const gss_buffer_desc *pub_key) { gss_buffer_desc *dh_shared = &skc->sc_dh_shared_key; BIGNUM *remote_pub_key; @@ -1175,8 +1274,8 @@ int sk_serialize_kctx(struct sk_cred *skc, gss_buffer_desc *ctx_token) char *p, *end; size_t bufsize; - bufsize = sizeof(*kctx) + kctx->skc_session_key.length + - kctx->skc_iv.length + kctx->skc_shared_key.length; + bufsize = sizeof(*kctx) + kctx->skc_hmac_key.length + + kctx->skc_encrypt_key.length; ctx_token->value = malloc(bufsize); if (!ctx_token->value) @@ -1194,11 +1293,13 @@ int sk_serialize_kctx(struct sk_cred *skc, gss_buffer_desc *ctx_token) return -1; if (WRITE_BYTES(&p, end, kctx->skc_expire)) return -1; - if (write_buffer(&p, end, &kctx->skc_shared_key)) + if (WRITE_BYTES(&p, end, kctx->skc_host_random)) + return -1; + if (WRITE_BYTES(&p, end, kctx->skc_peer_random)) return -1; - if (write_buffer(&p, end, &kctx->skc_iv)) + if (write_buffer(&p, end, &kctx->skc_hmac_key)) return -1; - if (write_buffer(&p, end, &kctx->skc_session_key)) + if (write_buffer(&p, end, &kctx->skc_encrypt_key)) return -1; printerr(2, "Serialized buffer of %zu bytes for kernel\n", bufsize); diff --git a/lustre/utils/gss/sk_utils.h b/lustre/utils/gss/sk_utils.h index e2c38f9..9644786 100644 --- a/lustre/utils/gss/sk_utils.h +++ b/lustre/utils/gss/sk_utils.h @@ -39,12 +39,33 @@ /* Some limits and defaults */ #define SK_CONF_VERSION 1 +#define SK_MSG_VERSION 1 #define SK_GENERATOR 2 #define SK_SESSION_MAX_KEYLEN_BYTES 1024 #define SK_MAX_KEYLEN_BYTES 128 -#define SK_IV_SIZE 16 +#define SK_NONCE_SIZE 4 #define MAX_MGSNIDS 16 +enum sk_ctx_init_buffers { + /* Initiator netstring buffer ordering */ + SK_INIT_VERSION = 0, + SK_INIT_RANDOM = 1, + SK_INIT_P = 2, + SK_INIT_PUB_KEY = 3, + SK_INIT_TARGET = 4, + SK_INIT_NODEMAP = 5, + SK_INIT_FLAGS = 6, + SK_INIT_HMAC = 7, + SK_INIT_BUFFERS = 8, + + /* Responder netstring buffer ordering */ + SK_RESP_VERSION = 0, + SK_RESP_RANDOM = 1, + SK_RESP_PUB_KEY = 2, + SK_RESP_HMAC = 3, + SK_RESP_BUFFERS = 4, +}; + /* String consisting of "lustre:fsname:nodemap_hash" */ #define SK_DESCRIPTION_SIZE (9 + MTI_NAME_MAXLEN + LUSTRE_NODEMAP_NAME_LENGTH) @@ -90,8 +111,11 @@ struct sk_kernel_ctx { uint16_t skc_hmac_alg; uint16_t skc_crypt_alg; uint32_t skc_expire; + uint32_t skc_host_random; + uint32_t skc_peer_random; + gss_buffer_desc skc_hmac_key; + gss_buffer_desc skc_encrypt_key; gss_buffer_desc skc_shared_key; - gss_buffer_desc skc_iv; gss_buffer_desc skc_session_key; }; @@ -126,9 +150,10 @@ uint32_t sk_verify_hmac(struct sk_cred *skc, gss_buffer_desc *bufs, const int numbufs, const EVP_MD *hash_alg, gss_buffer_desc *hmac); void sk_free_cred(struct sk_cred *skc); -int sk_kdf(struct sk_cred *skc, lnet_nid_t client_nid, - gss_buffer_desc *key_binding_input); -uint32_t sk_compute_key(struct sk_cred *skc, const gss_buffer_desc *pub_key); +int sk_session_kdf(struct sk_cred *skc, lnet_nid_t client_nid, + gss_buffer_desc *client_token, gss_buffer_desc *server_token); +uint32_t sk_compute_dh_key(struct sk_cred *skc, const gss_buffer_desc *pub_key); +int sk_compute_keys(struct sk_cred *skc); int sk_serialize_kctx(struct sk_cred *skc, gss_buffer_desc *ctx_token); int sk_decode_netstring(gss_buffer_desc *bufs, int numbufs, gss_buffer_desc *ns); diff --git a/lustre/utils/gss/svcgssd_proc.c b/lustre/utils/gss/svcgssd_proc.c index 4b249c3..8b97498 100644 --- a/lustre/utils/gss/svcgssd_proc.c +++ b/lustre/utils/gss/svcgssd_proc.c @@ -362,57 +362,72 @@ int handle_sk(struct svc_nego_data *snd) #ifdef HAVE_OPENSSL_SSK struct sk_cred *skc = NULL; struct svc_cred cred; - gss_buffer_desc bufs[7]; + gss_buffer_desc bufs[SK_INIT_BUFFERS]; gss_buffer_desc remote_pub_key = GSS_C_EMPTY_BUFFER; char *target; - uint32_t rc = GSS_S_FAILURE; + uint32_t rc = GSS_S_DEFECTIVE_TOKEN; + uint32_t version; uint32_t flags; - int numbufs = 7; int i; printerr(3, "Handling sk request\n"); + memset(bufs, 0, sizeof(gss_buffer_desc) * SK_INIT_BUFFERS); - /* See lgss_sk_using_cred() for client side token - * bufs returned are in this order: - * bufs[0] - iv - * bufs[1] - p - * bufs[2] - remote_pub_key - * bufs[3] - target - * bufs[4] - nodemap_hash - * bufs[5] - flags - * bufs[6] - hmac */ - i = sk_decode_netstring(bufs, numbufs, &snd->in_tok); - if (i < numbufs) { + /* See lgss_sk_using_cred() for client side token formation. + * Decoding initiator buffers */ + i = sk_decode_netstring(bufs, SK_INIT_BUFFERS, &snd->in_tok); + if (i < SK_INIT_BUFFERS) { printerr(0, "Invalid netstring token received from peer\n"); - rc = GSS_S_DEFECTIVE_TOKEN; - goto out_err; + goto cleanup_buffers; + } + + /* Allowing for a larger length first buffer in the future */ + if (bufs[SK_INIT_VERSION].length < sizeof(version)) { + printerr(0, "Invalid version received (wrong size)\n"); + goto cleanup_buffers; } + memcpy(&version, bufs[SK_INIT_VERSION].value, sizeof(version)); + version = be32toh(version); + if (version != SK_MSG_VERSION) { + printerr(0, "Invalid version received: %d\n", version); + goto cleanup_buffers; + } + + rc = GSS_S_FAILURE; /* target must be a null terminated string */ - i = bufs[3].length - 1; - target = bufs[3].value; + i = bufs[SK_INIT_TARGET].length - 1; + target = bufs[SK_INIT_TARGET].value; if (i >= 0 && target[i] != '\0') { printerr(0, "Invalid target from netstring\n"); - for (i = 0; i < numbufs; i++) - free(bufs[i].value); - goto out_err; + goto cleanup_buffers; } - memcpy(&flags, bufs[5].value, sizeof(flags)); + if (bufs[SK_INIT_FLAGS].length != sizeof(flags)) { + printerr(0, "Invalid flags from netstring\n"); + goto cleanup_buffers; + } + memcpy(&flags, bufs[SK_INIT_FLAGS].value, sizeof(flags)); + skc = sk_create_cred(target, snd->nm_name, be32toh(flags)); if (!skc) { printerr(0, "Failed to create sk credentials\n"); - for (i = 0; i < numbufs; i++) - free(bufs[i].value); - goto out_err; + goto cleanup_buffers; } /* Take control of all the allocated buffers from decoding */ - skc->sc_kctx.skc_iv = bufs[0]; - skc->sc_p = bufs[1]; - remote_pub_key = bufs[2]; - skc->sc_nodemap_hash = bufs[4]; - skc->sc_hmac = bufs[6]; + if (bufs[SK_INIT_RANDOM].length != + sizeof(skc->sc_kctx.skc_peer_random)) { + printerr(0, "Invalid size for client random\n"); + goto cleanup_buffers; + } + + memcpy(&skc->sc_kctx.skc_peer_random, bufs[SK_INIT_RANDOM].value, + sizeof(skc->sc_kctx.skc_peer_random)); + skc->sc_p = bufs[SK_INIT_P]; + remote_pub_key = bufs[SK_INIT_PUB_KEY]; + skc->sc_nodemap_hash = bufs[SK_INIT_NODEMAP]; + skc->sc_hmac = bufs[SK_INIT_HMAC]; /* Verify that the peer has used a key size greater to or equal * the size specified by the key file */ @@ -420,62 +435,76 @@ int handle_sk(struct svc_nego_data *snd) skc->sc_p.length < skc->sc_session_keylen) { printerr(0, "Peer DH parameters do not meet the size required " "by keyfile\n"); - goto out_err; + goto cleanup_partial; } /* Verify HMAC from peer. Ideally this would happen before anything * else but we don't have enough information to lookup key without the - * token (fsname and cluster_hash) so it's done shortly after. */ - rc = sk_verify_hmac(skc, bufs, numbufs - 1, EVP_sha256(), + * token (fsname and cluster_hash) so it's done after. */ + rc = sk_verify_hmac(skc, bufs, SK_INIT_BUFFERS - 1, EVP_sha256(), &skc->sc_hmac); - free(bufs[3].value); - free(bufs[5].value); if (rc != GSS_S_COMPLETE) { printerr(0, "HMAC verification error: 0x%x from peer %s\n", - rc, libcfs_nid2str((lnet_nid_t) snd->nid)); - goto out_err; + rc, libcfs_nid2str((lnet_nid_t)snd->nid)); + goto cleanup_partial; } /* Check that the cluster hash matches the hash of nodemap name */ rc = sk_verify_hash(snd->nm_name, EVP_sha256(), &skc->sc_nodemap_hash); if (rc != GSS_S_COMPLETE) { printerr(0, "Cluster hash failed validation: 0x%x\n", rc); - goto out_err; + goto cleanup_partial; } rc = sk_gen_params(skc, false); if (rc != GSS_S_COMPLETE) { printerr(0, "Failed to generate DH params for responder\n"); - goto out_err; + goto cleanup_partial; } - if (sk_compute_key(skc, &remote_pub_key)) { + if (sk_compute_dh_key(skc, &remote_pub_key)) { printerr(0, "Failed to compute session key from DH params\n"); + goto cleanup_partial; + } + + /* Cleanup init buffers we have copied or don't need anymore */ + free(bufs[SK_INIT_VERSION].value); + free(bufs[SK_INIT_RANDOM].value); + free(bufs[SK_INIT_TARGET].value); + free(bufs[SK_INIT_FLAGS].value); + + /* Server reply contains the servers public key, random, and HMAC */ + version = htobe32(SK_MSG_VERSION); + bufs[SK_RESP_VERSION].value = &version; + bufs[SK_RESP_VERSION].length = sizeof(version); + bufs[SK_RESP_RANDOM].value = &skc->sc_kctx.skc_host_random; + bufs[SK_RESP_RANDOM].length = sizeof(skc->sc_kctx.skc_host_random); + bufs[SK_RESP_PUB_KEY] = skc->sc_pub_key; + if (sk_sign_bufs(&skc->sc_kctx.skc_shared_key, bufs, + SK_RESP_BUFFERS - 1, EVP_sha256(), + &skc->sc_hmac)) { + printerr(0, "Failed to sign parameters\n"); goto out_err; } - if (sk_kdf(skc, snd->nid, &snd->in_tok)) { - printerr(0, "Failed to calulate derviced session key\n"); + bufs[SK_RESP_HMAC] = skc->sc_hmac; + if (sk_encode_netstring(bufs, SK_RESP_BUFFERS, &snd->out_tok)) { + printerr(0, "Failed to encode netstring for token\n"); goto out_err; } - if (sk_serialize_kctx(skc, &snd->ctx_token)) { - printerr(0, "Failed to serialize context for kernel\n"); + printerr(2, "Created netstring of %zd bytes\n", snd->out_tok.length); + + if (sk_session_kdf(skc, snd->nid, &snd->in_tok, &snd->out_tok)) { + printerr(0, "Failed to calulate derviced session key\n"); goto out_err; } - - /* Server reply only contains the servers public key and HMAC */ - bufs[0] = skc->sc_pub_key; - if (sk_sign_bufs(&skc->sc_kctx.skc_shared_key, bufs, 1, EVP_sha256(), - &skc->sc_hmac)) { - printerr(0, "Failed to sign parameters\n"); + if (sk_compute_keys(skc)) { + printerr(0, "Failed to compute HMAC and encryption keys\n"); goto out_err; } - bufs[1] = skc->sc_hmac; - if (sk_encode_netstring(bufs, 2, &snd->out_tok)) { - printerr(0, "Failed to encode netstring for token\n"); + if (sk_serialize_kctx(skc, &snd->ctx_token)) { + printerr(0, "Failed to serialize context for kernel\n"); goto out_err; } - printerr(2, "Created netstring of %zd bytes\n", snd->out_tok.length); - snd->out_handle.length = sizeof(snd->handle_seq); memcpy(snd->out_handle.value, &snd->handle_seq, sizeof(snd->handle_seq)); @@ -502,16 +531,32 @@ int handle_sk(struct svc_nego_data *snd) printerr(3, "sk returning success\n"); return 0; +cleanup_buffers: + for (i = 0; i < SK_INIT_BUFFERS; i++) + free(bufs[i].value); + sk_free_cred(skc); + snd->maj_stat = rc; + return -1; + +cleanup_partial: + free(bufs[SK_INIT_VERSION].value); + free(bufs[SK_INIT_RANDOM].value); + free(bufs[SK_INIT_TARGET].value); + free(bufs[SK_INIT_FLAGS].value); + free(remote_pub_key.value); + sk_free_cred(skc); + snd->maj_stat = rc; + return -1; + out_err: snd->maj_stat = rc; - if (remote_pub_key.value) - free(remote_pub_key.value); - if (snd->ctx_token.value) + if (snd->ctx_token.value) { free(snd->ctx_token.value); - snd->ctx_token.length = 0; - - if (skc) - sk_free_cred(skc); + snd->ctx_token.value = 0; + snd->ctx_token.length = 0; + } + free(remote_pub_key.value); + sk_free_cred(skc); printerr(3, "sk returning failure\n"); #else /* !HAVE_OPENSSL_SSK */ printerr(0, "ERROR: shared key subflavour is not enabled\n"); -- 1.8.3.1