#include <linux/mutex.h>
#include <crypto/ctr.h>
-#include <libcfs/libcfs_crypto.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
-#include <lustre/lustre_user.h>
#include "gss_err.h"
#include "gss_crypto.h"
#define SK_IV_REV_START (1ULL << 63)
struct sk_ctx {
- __u16 sc_pad;
- __u16 sc_crypt;
- __u32 sc_expire;
- __u32 sc_host_random;
- __u32 sc_peer_random;
- atomic64_t sc_iv;
- rawobj_t sc_hmac_key;
- struct gss_keyblock sc_session_kb;
- enum cfs_crypto_hash_alg sc_hmac;
+ enum cfs_crypto_crypt_alg sc_crypt;
+ enum cfs_crypto_hash_alg sc_hmac;
+ __u32 sc_expire;
+ __u32 sc_host_random;
+ __u32 sc_peer_random;
+ atomic64_t sc_iv;
+ rawobj_t sc_hmac_key;
+ struct gss_keyblock sc_session_kb;
};
struct sk_hdr {
rawobj_t skw_hmac;
};
-static struct sk_crypt_type sk_crypt_types[] = {
- [SK_CRYPT_AES256_CTR] = {
- .cht_name = "aes256",
- .cht_key = 0,
- .cht_bytes = 32,
- },
-};
-
static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
{
return (len + blocksize - 1) & (~(blocksize - 1));
memcpy(iv, &ctr, sizeof(ctr));
}
-static int sk_init_keys(struct sk_ctx *skc)
-{
- return gss_keyblock_init(&skc->sc_session_kb,
- sk_crypt_types[skc->sc_crypt].cht_name, 0);
-}
-
static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
{
char *ptr = inbuf->data;
char *end = inbuf->data + inbuf->len;
- __u32 tmp;
+ char sk_hmac[CRYPTO_MAX_ALG_NAME];
+ char sk_crypt[CRYPTO_MAX_ALG_NAME];
+ u32 tmp;
/* see sk_serialize_kctx() for format from userspace side */
/* 1. Version */
if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
- CERROR("Failed to read shared key interface version");
+ CERROR("Failed to read shared key interface version\n");
return -1;
}
if (tmp != SK_INTERFACE_VERSION) {
}
/* 2. HMAC type */
- if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
- CERROR("Failed to read HMAC algorithm type");
+ if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
+ CERROR("Failed to read HMAC algorithm type\n");
return -1;
}
- if (skc->sc_hmac >= CFS_HASH_ALG_MAX) {
- CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
+
+ skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
+ if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
+ skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
+ skc->sc_hmac != CFS_HASH_ALG_SHA512) {
+ CERROR("Invalid hmac type: %s\n", sk_hmac);
return -1;
}
/* 3. crypt type */
- if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
- CERROR("Failed to read crypt algorithm type");
+ if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
+ CERROR("Failed to read crypt algorithm type\n");
return -1;
}
- if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
- CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
+
+ skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
+ if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
+ CERROR("Invalid crypt type: %s\n", sk_crypt);
return -1;
}
/* 4. expiration time */
if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
- CERROR("Failed to read context expiration time");
+ CERROR("Failed to read context expiration time\n");
return -1;
}
- skc->sc_expire = tmp + cfs_time_current_sec();
+ skc->sc_expire = tmp + ktime_get_real_seconds();
/* 5. host random is used as nonce for encryption */
if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
sizeof(skc->sc_host_random))) {
- CERROR("Failed to read host random ");
+ CERROR("Failed to read host random\n");
return -1;
}
/* 6. peer random is used as nonce for decryption */
if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
sizeof(skc->sc_peer_random))) {
- CERROR("Failed to read peer random ");
+ CERROR("Failed to read peer random\n");
return -1;
}
/* 7. HMAC key */
if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
- CERROR("Failed to read HMAC key");
+ CERROR("Failed to read HMAC key\n");
return -1;
}
if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
/* 8. Session key, can be empty if not using privacy mode */
if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
- CERROR("Failed to read session key");
+ CERROR("Failed to read session key\n");
return -1;
}
/* Only privacy mode needs to initialize keys */
if (skc->sc_session_kb.kb_key.len > 0) {
privacy = true;
- if (sk_init_keys(skc))
+ if (gss_keyblock_init(&skc->sc_session_kb,
+ cfs_crypto_crypt_name(skc->sc_crypt), 0))
goto out_err;
}
/* Only privacy mode needs to initialize keys */
if (skc_new->sc_session_kb.kb_key.len > 0)
- if (sk_init_keys(skc_new))
+ if (gss_keyblock_init(&skc_new->sc_session_kb,
+ cfs_crypto_crypt_name(skc_new->sc_crypt),
+ 0))
goto out_err;
gss_context_new->internal_ctx_id = skc_new;
static
__u32 gss_inquire_context_sk(struct gss_ctx *gss_context,
- unsigned long *endtime)
+ time64_t *endtime)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
}
static
-__u32 sk_make_hmac(const char *alg_name, rawobj_t *key, int msg_count,
- rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
- rawobj_t *token)
+u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
+ rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
{
- struct crypto_ahash *tfm;
- int rc;
-
- tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ struct cfs_crypto_hash_desc *desc;
+ int rc2, rc;
- rc = GSS_S_FAILURE;
- LASSERT(token->len >= crypto_ahash_digestsize(tfm));
- if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
- token))
- rc = GSS_S_COMPLETE;
+ desc = cfs_crypto_hash_init(algo, key->data, key->len);
+ if (IS_ERR(desc)) {
+ rc = PTR_ERR(desc);
+ goto out_init_failed;
+ }
- crypto_free_ahash(tfm);
- return rc;
+ rc2 = gss_digest_hash(desc, NULL, msg_count, msgs, iov_count, iovs,
+ token);
+ rc = cfs_crypto_hash_final(desc, key->data, &key->len);
+ if (!rc && rc2)
+ rc = rc2;
+out_init_failed:
+ return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
static
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac),
+
+ return sk_make_hmac(skc->sc_hmac,
&skc->sc_hmac_key, message_count, messages,
iov_count, iovs, token);
}
static
u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
- int message_count, rawobj_t *messages, int iov_count,
- lnet_kiov_t *iovs, rawobj_t *token)
+ int message_count, rawobj_t *messages,
+ int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
__u32 rc = GSS_S_FAILURE;
checksum.len = cfs_crypto_hash_digestsize(algo);
- /* What about checksum.len == 0 ??? */
-
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
- if (sk_make_hmac(cfs_crypto_hash_name(algo), key, message_count,
+ if (sk_make_hmac(algo, key, message_count,
messages, iov_count, iovs, &checksum)) {
CDEBUG(D_SEC, "Failed to create checksum to validate\n");
goto cleanup;
* to decrypt up to the number of bytes actually specified from the sender
* (bd_nob) otherwise the calulated HMAC will be incorrect. */
static
-__u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac,
- rawobj_t *key, int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs, int iov_bytes,
- rawobj_t *token)
+u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
+ int msgcnt, rawobj_t *msgs, int iovcnt,
+ lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
{
+ struct cfs_crypto_hash_desc *desc;
rawobj_t checksum = RAWOBJ_EMPTY;
- struct cfs_crypto_hash_desc *hdesc;
- int rc = GSS_S_FAILURE, i;
+ struct ahash_request *req;
+ struct scatterlist sg[1];
+ int rc = GSS_S_FAILURE;
+ struct sg_table sgt;
+ int bytes;
+ int i;
checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
if (token->len < checksum.len) {
if (!checksum.data)
return rc;
+ desc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
+ if (IS_ERR(desc))
+ goto cleanup;
+
+ req = (struct ahash_request *) desc;
for (i = 0; i < msgcnt; i++) {
if (!msgs[i].len)
continue;
- rc = cfs_crypto_hash_digest(sc_hmac, msgs[i].data, msgs[i].len,
- key->data, key->len,
- checksum.data, &checksum.len);
- if (rc)
- goto cleanup;
- }
+ rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
+ if (rc != 0)
+ goto hash_cleanup;
- hdesc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
- if (IS_ERR(hdesc)) {
- rc = PTR_ERR(hdesc);
- goto cleanup;
+ ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+ rc = crypto_ahash_update(req);
+ if (rc) {
+ gss_teardown_sgtable(&sgt);
+ goto hash_cleanup;
+ }
+
+ gss_teardown_sgtable(&sgt);
}
for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
- int bytes;
-
if (iovs[i].kiov_len == 0)
continue;
bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
iov_bytes -= bytes;
- rc = cfs_crypto_hash_update_page(hdesc, iovs[i].kiov_page,
- iovs[i].kiov_offset, bytes);
+
+ sg_init_table(sg, 1);
+ sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
+ iovs[i].kiov_offset);
+ ahash_request_set_crypt(req, sg, NULL, bytes);
+ rc = crypto_ahash_update(req);
if (rc)
- goto cleanup;
+ goto hash_cleanup;
}
- rc = cfs_crypto_hash_final(hdesc, checksum.data, &checksum.len);
- if (rc)
- goto cleanup;
-
if (memcmp(token->data, checksum.data, checksum.len)) {
rc = GSS_S_BAD_SIG;
- goto cleanup;
+ goto hash_cleanup;
}
rc = GSS_S_COMPLETE;
+
+hash_cleanup:
+ cfs_crypto_hash_final(desc, checksum.data, &checksum.len);
+
cleanup:
OBD_FREE_LARGE(checksum.data, checksum.len);
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
+
return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
message_count, messages, iov_count, iovs, token);
}
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.len = sht_bytes;
- if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
+ if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
3, msgbufs, 0, NULL, &skw.skw_hmac))
return GSS_S_FAILURE;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
skw.skw_hmac.len = sht_bytes;
- if (sk_make_hmac(cfs_crypto_hash_name(skc->sc_hmac), &skc->sc_hmac_key,
- 1, &skw.skw_cipher, desc->bd_iov_count,
- GET_ENC_KIOV(desc), &skw.skw_hmac))
+ if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
+ desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
return GSS_S_FAILURE;
return GSS_S_COMPLETE;
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = cfs_crypto_hash_digestsize(skc->sc_hmac);
+ skw.skw_hmac.len = sht_bytes;
skh = (struct sk_hdr *)skw.skw_header.data;
rc = sk_verify_header(skh);
if (rc != GSS_S_COMPLETE)
return rc;
- rc = sk_verify_bulk_hmac(skc->sc_hmac,
- &skc->sc_hmac_key, 1, &skw.skw_cipher,
- desc->bd_iov_count, GET_ENC_KIOV(desc),
- desc->bd_nob, &skw.skw_hmac);
+ rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
+ &skw.skw_cipher, desc->bd_iov_count,
+ GET_ENC_KIOV(desc), desc->bd_nob,
+ &skw.skw_hmac);
if (rc)
return rc;