unsigned int cht_size; /**< hash digest size */
};
+struct cfs_crypto_crypt_type {
+ char *cct_name; /**< crypto algorithm name, equal to
+ * format name for crypto api */
+ unsigned int cct_size; /**< crypto key size */
+};
+
enum cfs_crypto_hash_alg {
CFS_HASH_ALG_NULL = 0,
CFS_HASH_ALG_ADLER32,
CFS_HASH_ALG_UNKNOWN = 0xff
};
+enum cfs_crypto_crypt_alg {
+ CFS_CRYPT_ALG_NULL = 0,
+ CFS_CRYPT_ALG_AES256_CTR,
+ CFS_CRYPT_ALG_MAX,
+ CFS_CRYPT_ALG_UNKNOWN = 0xff
+};
+
static struct cfs_crypto_hash_type hash_types[] = {
[CFS_HASH_ALG_NULL] = {
.cht_name = "null",
}
};
+static struct cfs_crypto_crypt_type crypt_types[] = {
+ [CFS_CRYPT_ALG_NULL] = {
+ .cct_name = "null",
+ .cct_size = 0
+ },
+ [CFS_CRYPT_ALG_AES256_CTR] = {
+ .cct_name = "ctr(aes)",
+ .cct_size = 32
+ }
+};
+
/* Maximum size of hash_types[].cht_size */
#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
return CFS_HASH_ALG_UNKNOWN;
}
+/**
+ * Return crypt algorithm information for the specified algorithm identifier
+ *
+ * Crypt information includes algorithm name, key size.
+ *
+ * \retval cfs_crypto_crupt_type for valid ID (CFS_CRYPT_ALG_*)
+ * \retval NULL for unknown algorithm identifier
+ */
+static inline const struct
+cfs_crypto_crypt_type *cfs_crypto_crypt_type(
+ enum cfs_crypto_crypt_alg crypt_alg)
+{
+ struct cfs_crypto_crypt_type *ct;
+
+ if (crypt_alg < CFS_CRYPT_ALG_MAX) {
+ ct = &crypt_types[crypt_alg];
+ if (ct->cct_name != NULL)
+ return ct;
+ }
+ return NULL;
+}
+
+/**
+ * Return crypt name for crypt algorithm identifier
+ *
+ * \param[in] crypt_alg crypt alrgorithm id (CFS_CRYPT_ALG_*)
+ *
+ * \retval string name of known crypt algorithm
+ * \retval "unknown" if hash algorithm is unknown
+ */
+static inline const
+char *cfs_crypto_crypt_name(enum cfs_crypto_crypt_alg crypt_alg)
+{
+ const struct cfs_crypto_crypt_type *ct;
+
+ ct = cfs_crypto_crypt_type(crypt_alg);
+ if (ct)
+ return ct->cct_name;
+
+ return "unknown";
+}
+
+
+/**
+ * Return key size for crypto algorithm type
+ *
+ * \param[in] crypt_alg crypt alrgorithm id (CFS_CRYPT_ALG_*)
+ *
+ * \retval crypt algorithm key size in bytes
+ * \retval 0 if crypt algorithm type is unknown
+ */
+static inline
+unsigned int cfs_crypto_crypt_keysize(enum cfs_crypto_crypt_alg crypt_alg)
+{
+ const struct cfs_crypto_crypt_type *ct;
+
+ ct = cfs_crypto_crypt_type(crypt_alg);
+ if (ct != NULL)
+ return ct->cct_size;
+
+ return 0;
+}
+
+/**
+ * Find crypto algorithm ID for the specified algorithm name
+ *
+ * \retval crypto algorithm ID for valid ID (CFS_CRYPT_ALG_*)
+ * \retval CFS_CRYPT_ALG_UNKNOWN for unknown algorithm name
+ */
+static inline unsigned char cfs_crypto_crypt_alg(const char *algname)
+{
+ enum cfs_crypto_crypt_alg crypt_alg;
+
+ for (crypt_alg = 0; crypt_alg < CFS_CRYPT_ALG_MAX; crypt_alg++)
+ if (strcmp(crypt_types[crypt_alg].cct_name, algname) == 0)
+ return crypt_alg;
+
+ return CFS_CRYPT_ALG_UNKNOWN;
+}
+
int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
]) # LC_CONFIG_SUNRPC
#
-# LC_HAVE_CRYPTO_HASH
-#
-# 4.6 kernel commit 896545098777564212b9e91af4c973f094649aa7
-# removed crypto_hash support. Since GSS only works with
-# crypto_hash it has to be disabled for newer distros.
-#
-AC_DEFUN([LC_HAVE_CRYPTO_HASH], [
-LB_CHECK_COMPILE([if crypto_hash API is supported],
-crypto_hash, [
- #include <linux/crypto.h>
-],[
- crypto_hash_digestsize(NULL);
-], [], [enable_gss="no"])
-])
-]) # LC_HAVE_CRYPTO_HASH
-
-#
# LC_CONFIG_GSS (default 'auto' (tests for dependencies, if found, enables))
#
# Build gss and related tools of Lustre. Currently both kernel and user space
AS_IF([test "x$enable_gss" != xno], [
LC_CONFIG_GSS_KEYRING
- LC_HAVE_CRYPTO_HASH
LC_HAVE_CRED_TGCRED
LC_KEY_TYPE_INSTANTIATE_2ARGS
sunrpc_required=$enable_gss
]) # LC_VM_OPERATIONS_REMOVE_VMF_ARG
#
+# LC_HAVE_KEY_USAGE_REFCOUNT
+#
+# Kernel version 4.11 commit fff292914d3a2f1efd05ca71c2ba72a3c663201e
+# converted key.usage from atomic_t to refcount_t.
+#
+AC_DEFUN([LC_HAVE_KEY_USAGE_REFCOUNT], [
+LB_CHECK_COMPILE([if 'key.usage' is refcount_t],
+key_usage_refcount, [
+ #include <linux/key.h>
+],[
+ struct key key = { };
+
+ refcount_read(&key.usage);
+],[
+ AC_DEFINE(HAVE_KEY_USAGE_REFCOUNT, 1, [key.usage is of type refcount_t])
+])
+]) #LC_HAVE_KEY_USAGE_REFCOUNT
+
+#
+# LC_HAVE_CRYPTO_MAX_ALG_NAME_128
+#
+# Kernel version 4.11 commit f437a3f477cce402dbec6537b29e9e33962c9f73
+# switched CRYPTO_MAX_ALG_NAME from 64 to 128.
+#
+AC_DEFUN([LC_HAVE_CRYPTO_MAX_ALG_NAME_128], [
+LB_CHECK_COMPILE([if 'CRYPTO_MAX_ALG_NAME' is 128],
+crypto_max_alg_name, [
+ #include <linux/crypto.h>
+],[
+ #if CRYPTO_MAX_ALG_NAME != 128
+ exit(1);
+ #endif
+],[
+ AC_DEFINE(HAVE_CRYPTO_MAX_ALG_NAME_128, 1,
+ ['CRYPTO_MAX_ALG_NAME' is 128])
+])
+]) # LC_HAVE_CRYPTO_MAX_ALG_NAME_128
+
+#
# Kernel version 4.12 commit 47f38c539e9a42344ff5a664942075bd4df93876
# CURRENT_TIME is not 64 bit time safe so it was replaced with
# current_time()
])
]) # LC_BI_BDEV
-
#
# LC_PROG_LINUX
#
# 4.11
LC_INODEOPS_ENHANCED_GETATTR
LC_VM_OPERATIONS_REMOVE_VMF_ARG
+ LC_HAVE_KEY_USAGE_REFCOUNT
+ LC_HAVE_CRYPTO_MAX_ALG_NAME_128
# 4.12
LC_CURRENT_TIME
SK_CRYPT_INVALID = -1,
SK_CRYPT_EMPTY = 0,
SK_CRYPT_AES256_CTR = 1,
- SK_CRYPT_MAX = 2,
};
enum sk_hmac_alg {
SK_HMAC_EMPTY = 0,
SK_HMAC_SHA256 = 1,
SK_HMAC_SHA512 = 2,
- SK_HMAC_MAX = 3,
};
struct sk_crypt_type {
- char *sct_name;
- size_t sct_bytes;
+ const char *sct_name;
+ int sct_type;
};
struct sk_hmac_type {
- char *sht_name;
- size_t sht_bytes;
+ const char *sht_name;
+ int sht_type;
};
enum lock_mode_user {
#include "gss_internal.h"
#include "gss_crypto.h"
-int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name,
+int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
const int alg_mode)
{
int rc;
return ret;
}
-int gss_digest_hmac(struct crypto_hash *tfm,
- rawobj_t *key,
- rawobj_t *hdr,
- int msgcnt, rawobj_t *msgs,
+int gss_digest_hash(struct cfs_crypto_hash_desc *desc,
+ rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct hash_desc desc = {
- .tfm = tfm,
- .flags = 0,
- };
+ struct ahash_request *req = (struct ahash_request *)desc;
struct scatterlist sg[1];
struct sg_table sgt;
+ int rc = 0;
int i;
- int rc;
-
- rc = crypto_hash_setkey(tfm, key->data, key->len);
- if (rc)
- return rc;
-
- rc = crypto_hash_init(&desc);
- if (rc)
- return rc;
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
- if (rc != 0)
- return rc;
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
if (rc)
return rc;
+ ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+ rc = crypto_ahash_update(req);
gss_teardown_sgtable(&sgt);
- }
-
- for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
- continue;
-
- sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
- iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
if (rc)
return rc;
}
- if (hdr) {
- rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
- if (rc != 0)
- return rc;
- rc = crypto_hash_update(&desc, sg, sizeof(hdr->len));
- if (rc)
- return rc;
-
- gss_teardown_sgtable(&sgt);
- }
-
- return crypto_hash_final(&desc, cksum->data);
-}
-
-int gss_digest_norm(struct crypto_hash *tfm,
- struct gss_keyblock *kb,
- rawobj_t *hdr,
- int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
- rawobj_t *cksum)
-{
- struct hash_desc desc;
- struct scatterlist sg[1];
- struct sg_table sgt;
- int i;
- int rc;
-
- LASSERT(kb->kb_tfm);
- desc.tfm = tfm;
- desc.flags = 0;
-
- rc = crypto_hash_init(&desc);
- if (rc)
- return rc;
-
- for (i = 0; i < msgcnt; i++) {
- if (msgs[i].len == 0)
- continue;
-
- rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
- if (rc != 0)
- return rc;
-
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
- if (rc)
- return rc;
-
- gss_teardown_sgtable(&sgt);
- }
-
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+
+ ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+ rc = crypto_ahash_update(req);
if (rc)
return rc;
}
if (hdr) {
rc = gss_setup_sgtable(&sgt, sg, hdr, sizeof(*hdr));
- if (rc != 0)
- return rc;
-
- rc = crypto_hash_update(&desc, sg, sizeof(*hdr));
if (rc)
return rc;
+ ahash_request_set_crypt(req, sg, NULL, hdr->len);
+ rc = crypto_ahash_update(req);
gss_teardown_sgtable(&sgt);
+ if (rc)
+ return rc;
}
- rc = crypto_hash_final(&desc, cksum->data);
- if (rc)
- return rc;
-
- return gss_crypt_generic(kb->kb_tfm, 0, NULL, cksum->data,
- cksum->data, cksum->len);
+ return rc;
}
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
struct crypto_blkcipher *kb_tfm;
};
-int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name,
+int gss_keyblock_init(struct gss_keyblock *kb, const char *alg_name,
const int alg_mode);
void gss_keyblock_free(struct gss_keyblock *kb);
int gss_keyblock_dup(struct gss_keyblock *new, struct gss_keyblock *kb);
void gss_teardown_sgtable(struct sg_table *sgt);
int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
const void *in, void *out, size_t length);
-int gss_digest_hmac(struct crypto_hash *tfm, rawobj_t *key, rawobj_t *hdr,
+int gss_digest_hash(struct cfs_crypto_hash_desc *desc, rawobj_t *hdr,
int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum);
-int gss_digest_norm(struct crypto_hash *tfm, struct gss_keyblock *kb,
- rawobj_t *hdr, int msgcnt, rawobj_t *msgs, int iovcnt,
- lnet_kiov_t *iovs, rawobj_t *cksum);
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
#define __PTLRPC_GSS_GSS_INTERNAL_H_
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <libcfs/libcfs_crypto.h>
#include <lustre_sec.h>
/*
OBD_FREE(buf, bufsize);
}
+static inline unsigned int ll_read_key_usage(struct key *key)
+{
+#ifdef HAVE_KEY_USAGE_REFCOUNT
+ return refcount_read(&key->usage);
+#else
+ return atomic_read(&key->usage);
+#endif
+}
+
#endif /* __PTLRPC_GSS_GSS_INTERNAL_H_ */
#define DUMP_KEY(key) \
{ \
- CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
- key, key->serial, atomic_read(&key->usage), \
- key->uid, key->gid, \
- key->description ? key->description : "n/a" \
- ); \
+ CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
+ key, key->serial, ll_read_key_usage(key), \
+ key->uid, key->gid, \
+ key->description ? key->description : "n/a" \
+ ); \
}
#define key_cred(tsk) ((tsk)->cred)
set_bit(KEY_FLAG_REVOKED, &key->flags);
}
-static void ctx_upcall_timeout_kr(unsigned long data)
+static void ctx_upcall_timeout_kr(cfs_timer_cb_arg_t data)
{
- struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
- struct key *key = ctx2gctx_keyring(ctx)->gck_key;
+ struct gss_cli_ctx_keyring *gctx_kr = cfs_from_timer(gctx_kr,
+ &data, gck_timer);
+ struct ptlrpc_cli_ctx *ctx = &(gctx_kr->gck_base.gc_base);
+ struct key *key = gctx_kr->gck_key;
CWARN("ctx %p, key %p\n", ctx, key);
CDEBUG(D_SEC, "ctx %p: start timer %llds\n", ctx, timeout);
- init_timer(timer);
+ cfs_timer_setup(timer, ctx_upcall_timeout_kr,
+ (unsigned long)gctx_kr, 0);
timer->expires = cfs_time_seconds(timeout) + jiffies;
- timer->data = (unsigned long ) ctx;
- timer->function = ctx_upcall_timeout_kr;
-
add_timer(timer);
}
struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
struct vfs_cred *vcred)
{
- struct ptlrpc_cli_ctx *ctx;
- struct gss_cli_ctx_keyring *gctx_kr;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx_keyring *gctx_kr;
- OBD_ALLOC_PTR(gctx_kr);
- if (gctx_kr == NULL)
- return NULL;
+ OBD_ALLOC_PTR(gctx_kr);
+ if (gctx_kr == NULL)
+ return NULL;
- OBD_ALLOC_PTR(gctx_kr->gck_timer);
- if (gctx_kr->gck_timer == NULL) {
- OBD_FREE_PTR(gctx_kr);
- return NULL;
- }
- init_timer(gctx_kr->gck_timer);
+ OBD_ALLOC_PTR(gctx_kr->gck_timer);
+ if (gctx_kr->gck_timer == NULL) {
+ OBD_FREE_PTR(gctx_kr);
+ return NULL;
+ }
+ cfs_timer_setup(gctx_kr->gck_timer, NULL, 0, 0);
- ctx = &gctx_kr->gck_base.gc_base;
+ ctx = &gctx_kr->gck_base.gc_base;
- if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
- OBD_FREE_PTR(gctx_kr->gck_timer);
- OBD_FREE_PTR(gctx_kr);
- return NULL;
- }
+ if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
+ OBD_FREE_PTR(gctx_kr->gck_timer);
+ OBD_FREE_PTR(gctx_kr);
+ return NULL;
+ }
ctx->cc_expire = ktime_get_real_seconds() + KEYRING_UPCALL_TIMEOUT;
clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(atomic_read(&key->usage) > 0);
+ LASSERT(ll_read_key_usage(key) > 0);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
LASSERT(!key_get_payload(key, 0));
if (likely(ctx)) {
LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
- LASSERT(atomic_read(&key->usage) >= 2);
+ LASSERT(ll_read_key_usage(key) >= 2);
/* simply take a ref and return. it's upper layer's
* responsibility to detect & replace dead ctx. */
atomic_read(&gctx->gc_seq),
gctx->gc_win,
key ? key->serial : 0,
- key ? atomic_read(&key->usage) : 0,
+ key ? ll_read_key_usage(key) : 0,
gss_handle_to_u64(&gctx->gc_handle),
gss_handle_to_u64(&gctx->gc_svc_handle),
mech);
[ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
.ke_dispname = "des3-hmac-sha1",
.ke_enc_name = "cbc(des3_ede)",
- .ke_hash_name = "hmac(sha1)",
+ .ke_hash_name = "sha1",
.ke_hash_size = 20,
.ke_conf_size = 8,
.ke_hash_hmac = 1,
[ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
.ke_dispname = "aes128-cts-hmac-sha1-96",
.ke_enc_name = "cbc(aes)",
- .ke_hash_name = "hmac(sha1)",
+ .ke_hash_name = "sha1",
.ke_hash_size = 12,
.ke_conf_size = 16,
.ke_hash_hmac = 1,
[ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
.ke_dispname = "aes256-cts-hmac-sha1-96",
.ke_enc_name = "cbc(aes)",
- .ke_hash_name = "hmac(sha1)",
+ .ke_hash_name = "sha1",
.ke_hash_size = 12,
.ke_conf_size = 16,
.ke_hash_hmac = 1,
[ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
.ke_dispname = "arcfour-hmac-md5",
.ke_enc_name = "ecb(arc4)",
- .ke_hash_name = "hmac(md5)",
+ .ke_hash_name = "md5",
.ke_hash_size = 16,
.ke_conf_size = 8,
.ke_hash_hmac = 1,
}
};
-#define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
-
static const char * enctype2str(__u32 enctype)
{
- if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
- return enctypes[enctype].ke_dispname;
+ if (enctype < ARRAY_SIZE(enctypes) && enctypes[enctype].ke_dispname)
+ return enctypes[enctype].ke_dispname;
- return "unknown";
+ return "unknown";
}
static
int krb5_init_keys(struct krb5_ctx *kctx)
{
- struct krb5_enctype *ke;
+ struct krb5_enctype *ke;
- if (kctx->kc_enctype >= MAX_ENCTYPES ||
- enctypes[kctx->kc_enctype].ke_hash_size == 0) {
- CERROR("unsupported enctype %x\n", kctx->kc_enctype);
- return -1;
- }
+ if (kctx->kc_enctype >= ARRAY_SIZE(enctypes) ||
+ enctypes[kctx->kc_enctype].ke_hash_size == 0) {
+ CERROR("unsupported enctype %x\n", kctx->kc_enctype);
+ return -1;
+ }
ke = &enctypes[kctx->kc_enctype];
int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct krb5_enctype *ke = &enctypes[enctype];
- struct crypto_hash *tfm;
- rawobj_t hdr;
- __u32 code = GSS_S_FAILURE;
- int rc;
-
- if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
- CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
- return GSS_S_FAILURE;
- }
+ struct krb5_enctype *ke = &enctypes[enctype];
+ struct cfs_crypto_hash_desc *desc = NULL;
+ enum cfs_crypto_hash_alg hash_algo;
+ rawobj_t hdr;
+ int rc;
+
+ hash_algo = cfs_crypto_hash_alg(ke->ke_hash_name);
+
+ /* For the cbc(des) case we want md5 instead of hmac(md5) */
+ if (strcmp(ke->ke_enc_name, "cbc(des)"))
+ desc = cfs_crypto_hash_init(hash_algo, kb->kb_key.data,
+ kb->kb_key.len);
+ else
+ desc = cfs_crypto_hash_init(hash_algo, NULL, 0);
+ if (IS_ERR(desc)) {
+ rc = PTR_ERR(desc);
+ CERROR("failed to alloc hash %s : rc = %d\n",
+ ke->ke_hash_name, rc);
+ goto out_no_hash;
+ }
- cksum->len = crypto_hash_digestsize(tfm);
- OBD_ALLOC_LARGE(cksum->data, cksum->len);
- if (!cksum->data) {
- cksum->len = 0;
- goto out_tfm;
- }
+ cksum->len = cfs_crypto_hash_digestsize(hash_algo);
+ OBD_ALLOC_LARGE(cksum->data, cksum->len);
+ if (!cksum->data) {
+ cksum->len = 0;
+ rc = -ENOMEM;
+ goto out_free_hash;
+ }
hdr.data = (__u8 *)khdr;
hdr.len = sizeof(*khdr);
- if (ke->ke_hash_hmac)
- rc = gss_digest_hmac(tfm, &kb->kb_key,
- &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
- else
- rc = gss_digest_norm(tfm, kb,
- &hdr, msgcnt, msgs, iovcnt, iovs, cksum);
+ rc = gss_digest_hash(desc, &hdr, msgcnt, msgs,
+ iovcnt, iovs, cksum);
+ if (rc)
+ goto out_free_hash;
- if (rc == 0)
- code = GSS_S_COMPLETE;
-out_tfm:
- crypto_free_hash(tfm);
- return code;
+ if (!ke->ke_hash_hmac) {
+ LASSERT(kb->kb_tfm);
+
+ cfs_crypto_hash_final(desc, cksum->data, &cksum->len);
+ rc = gss_crypt_generic(kb->kb_tfm, 0, NULL,
+ cksum->data, cksum->data,
+ cksum->len);
+ goto out_no_hash;
+ }
+
+out_free_hash:
+ if (desc)
+ cfs_crypto_hash_final(desc, cksum->data, &cksum->len);
+out_no_hash:
+ return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
static void fill_krb5_header(struct krb5_ctx *kctx,
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ u32 major;
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
fill_krb5_header(kctx, khdr, 0);
/* checksum */
- if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
- khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
- return GSS_S_FAILURE;
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc, khdr,
+ msgcnt, msgs, iovcnt, iovs, &cksum))
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size);
- token->len = sizeof(*khdr) + ke->ke_hash_size;
- rawobj_free(&cksum);
- return GSS_S_COMPLETE;
+ token->len = sizeof(*khdr) + ke->ke_hash_size;
+ major = GSS_S_COMPLETE;
+out_free_cksum:
+ rawobj_free(&cksum);
+ return major;
}
static
lnet_kiov_t *iovs,
rawobj_t *token)
{
- struct krb5_ctx *kctx = gctx->internal_ctx_id;
- struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
- struct krb5_header *khdr;
- rawobj_t cksum = RAWOBJ_EMPTY;
- __u32 major;
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ u32 major;
if (token->len < sizeof(*khdr)) {
CERROR("short signature: %u\n", token->len);
major = verify_krb5_header(kctx, khdr, 0);
if (major != GSS_S_COMPLETE) {
CERROR("bad krb5 header\n");
- return major;
+ goto out;
}
if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
CERROR("short signature: %u, require %d\n",
token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
- return GSS_S_FAILURE;
+ GOTO(out, major = GSS_S_FAILURE);
}
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
CERROR("failed to make checksum\n");
- return GSS_S_FAILURE;
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
}
LASSERT(cksum.len >= ke->ke_hash_size);
if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
CERROR("checksum mismatch\n");
- rawobj_free(&cksum);
- return GSS_S_BAD_SIG;
- }
-
- rawobj_free(&cksum);
- return GSS_S_COMPLETE;
+ GOTO(out_free_cksum, major = GSS_S_BAD_SIG);
+ }
+ major = GSS_S_COMPLETE;
+out_free_cksum:
+ rawobj_free(&cksum);
+out:
+ return major;
}
/*
rawobj_t data_desc[3], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
__u8 local_iv[16] = {0};
+ u32 major;
int rc = 0;
LASSERT(ke);
/* compute checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
khdr, 3, data_desc, 0, NULL, &cksum))
- return GSS_S_FAILURE;
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
/*
LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
+ rawobj_t arc4_keye = RAWOBJ_EMPTY;
struct crypto_blkcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, rc = -EACCES);
+ GOTO(arc4_out_key, rc = -EACCES);
}
arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
arc4_out_tfm:
crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
- rawobj_free(&arc4_keye);
-arc4_out:
- do {} while(0); /* just to avoid compile warning */
+ rawobj_free(&arc4_keye);
} else {
rc = gss_crypt_rawobjs(kctx->kc_keye.kb_tfm, local_iv, 3,
data_desc, &cipher, 1);
}
- if (rc != 0) {
- rawobj_free(&cksum);
- return GSS_S_FAILURE;
- }
+ if (rc)
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
/* fill in checksum */
LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
memcpy((char *)(khdr + 1) + cipher.len,
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size);
- rawobj_free(&cksum);
- /* final token length */
- token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
- return GSS_S_COMPLETE;
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ major = GSS_S_COMPLETE;
+out_free_cksum:
+ rawobj_free(&cksum);
+ return major;
}
static
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t data_desc[1], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
- int rc = 0;
+ int rc = 0;
+ u32 major;
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(ke);
khdr, 1, data_desc,
desc->bd_iov_count, GET_KIOV(desc),
&cksum))
- return GSS_S_FAILURE;
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
LASSERT(cksum.len >= ke->ke_hash_size);
/*
rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
conf, desc, &cipher, adj_nob);
}
-
- if (rc != 0) {
- rawobj_free(&cksum);
- return GSS_S_FAILURE;
- }
+ if (rc)
+ GOTO(out_free_cksum, major = GSS_S_FAILURE);
/* fill in checksum */
LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
memcpy((char *)(khdr + 1) + cipher.len,
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size);
- rawobj_free(&cksum);
- /* final token length */
- token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
- return GSS_S_COMPLETE;
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ major = GSS_S_COMPLETE;
+out_free_cksum:
+ rawobj_free(&cksum);
+ return major;
}
static
#define SK_IV_REV_START (1ULL << 63)
struct sk_ctx {
- __u16 sc_hmac;
- __u16 sc_crypt;
- __u32 sc_expire;
- __u32 sc_host_random;
- __u32 sc_peer_random;
- atomic64_t sc_iv;
- rawobj_t sc_hmac_key;
- struct gss_keyblock sc_session_kb;
+ enum cfs_crypto_crypt_alg sc_crypt;
+ enum cfs_crypto_hash_alg sc_hmac;
+ __u32 sc_expire;
+ __u32 sc_host_random;
+ __u32 sc_peer_random;
+ atomic64_t sc_iv;
+ rawobj_t sc_hmac_key;
+ struct gss_keyblock sc_session_kb;
};
struct sk_hdr {
rawobj_t skw_hmac;
};
-static struct sk_crypt_type sk_crypt_types[] = {
- [SK_CRYPT_AES256_CTR] = {
- .sct_name = "ctr(aes)",
- .sct_bytes = 32,
- },
-};
-
-static struct sk_hmac_type sk_hmac_types[] = {
- [SK_HMAC_SHA256] = {
- .sht_name = "hmac(sha256)",
- .sht_bytes = 32,
- },
- [SK_HMAC_SHA512] = {
- .sht_name = "hmac(sha512)",
- .sht_bytes = 64,
- },
-};
-
static inline unsigned long sk_block_mask(unsigned long len, int blocksize)
{
return (len + blocksize - 1) & (~(blocksize - 1));
memcpy(iv, &ctr, sizeof(ctr));
}
-static int sk_init_keys(struct sk_ctx *skc)
-{
- return gss_keyblock_init(&skc->sc_session_kb,
- sk_crypt_types[skc->sc_crypt].sct_name, 0);
-}
-
static int sk_fill_context(rawobj_t *inbuf, struct sk_ctx *skc)
{
char *ptr = inbuf->data;
char *end = inbuf->data + inbuf->len;
- __u32 tmp;
+ char sk_hmac[CRYPTO_MAX_ALG_NAME];
+ char sk_crypt[CRYPTO_MAX_ALG_NAME];
+ u32 tmp;
/* see sk_serialize_kctx() for format from userspace side */
/* 1. Version */
if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
- CERROR("Failed to read shared key interface version");
+ CERROR("Failed to read shared key interface version\n");
return -1;
}
if (tmp != SK_INTERFACE_VERSION) {
}
/* 2. HMAC type */
- if (gss_get_bytes(&ptr, end, &skc->sc_hmac, sizeof(skc->sc_hmac))) {
- CERROR("Failed to read HMAC algorithm type");
+ if (gss_get_bytes(&ptr, end, &sk_hmac, sizeof(sk_hmac))) {
+ CERROR("Failed to read HMAC algorithm type\n");
return -1;
}
- if (skc->sc_hmac <= SK_HMAC_EMPTY || skc->sc_hmac >= SK_HMAC_MAX) {
- CERROR("Invalid hmac type: %d\n", skc->sc_hmac);
+
+ skc->sc_hmac = cfs_crypto_hash_alg(sk_hmac);
+ if (skc->sc_hmac != CFS_HASH_ALG_NULL &&
+ skc->sc_hmac != CFS_HASH_ALG_SHA256 &&
+ skc->sc_hmac != CFS_HASH_ALG_SHA512) {
+ CERROR("Invalid hmac type: %s\n", sk_hmac);
return -1;
}
/* 3. crypt type */
- if (gss_get_bytes(&ptr, end, &skc->sc_crypt, sizeof(skc->sc_crypt))) {
- CERROR("Failed to read crypt algorithm type");
+ if (gss_get_bytes(&ptr, end, &sk_crypt, sizeof(sk_crypt))) {
+ CERROR("Failed to read crypt algorithm type\n");
return -1;
}
- if (skc->sc_crypt <= SK_CRYPT_EMPTY || skc->sc_crypt >= SK_CRYPT_MAX) {
- CERROR("Invalid crypt type: %d\n", skc->sc_crypt);
+
+ skc->sc_crypt = cfs_crypto_crypt_alg(sk_crypt);
+ if (skc->sc_crypt == CFS_CRYPT_ALG_UNKNOWN) {
+ CERROR("Invalid crypt type: %s\n", sk_crypt);
return -1;
}
/* 4. expiration time */
if (gss_get_bytes(&ptr, end, &tmp, sizeof(tmp))) {
- CERROR("Failed to read context expiration time");
+ CERROR("Failed to read context expiration time\n");
return -1;
}
skc->sc_expire = tmp + ktime_get_real_seconds();
/* 5. host random is used as nonce for encryption */
if (gss_get_bytes(&ptr, end, &skc->sc_host_random,
sizeof(skc->sc_host_random))) {
- CERROR("Failed to read host random ");
+ CERROR("Failed to read host random\n");
return -1;
}
/* 6. peer random is used as nonce for decryption */
if (gss_get_bytes(&ptr, end, &skc->sc_peer_random,
sizeof(skc->sc_peer_random))) {
- CERROR("Failed to read peer random ");
+ CERROR("Failed to read peer random\n");
return -1;
}
/* 7. HMAC key */
if (gss_get_rawobj(&ptr, end, &skc->sc_hmac_key)) {
- CERROR("Failed to read HMAC key");
+ CERROR("Failed to read HMAC key\n");
return -1;
}
if (skc->sc_hmac_key.len <= SK_MIN_SIZE) {
/* 8. Session key, can be empty if not using privacy mode */
if (gss_get_rawobj(&ptr, end, &skc->sc_session_kb.kb_key)) {
- CERROR("Failed to read session key");
+ CERROR("Failed to read session key\n");
return -1;
}
/* Only privacy mode needs to initialize keys */
if (skc->sc_session_kb.kb_key.len > 0) {
privacy = true;
- if (sk_init_keys(skc))
+ if (gss_keyblock_init(&skc->sc_session_kb,
+ cfs_crypto_crypt_name(skc->sc_crypt), 0))
goto out_err;
}
/* Only privacy mode needs to initialize keys */
if (skc_new->sc_session_kb.kb_key.len > 0)
- if (sk_init_keys(skc_new))
+ if (gss_keyblock_init(&skc_new->sc_session_kb,
+ cfs_crypto_crypt_name(skc_new->sc_crypt),
+ 0))
goto out_err;
gss_context_new->internal_ctx_id = skc_new;
}
static
-__u32 sk_make_hmac(char *alg_name, rawobj_t *key, int msg_count, rawobj_t *msgs,
- int iov_count, lnet_kiov_t *iovs, rawobj_t *token)
+u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
+ rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
{
- struct crypto_hash *tfm;
- int rc;
-
- tfm = crypto_alloc_hash(alg_name, 0, 0);
- if (IS_ERR(tfm))
- return GSS_S_FAILURE;
+ struct cfs_crypto_hash_desc *desc;
+ int rc2, rc;
- rc = GSS_S_FAILURE;
- LASSERT(token->len >= crypto_hash_digestsize(tfm));
- if (!gss_digest_hmac(tfm, key, NULL, msg_count, msgs, iov_count, iovs,
- token))
- rc = GSS_S_COMPLETE;
+ desc = cfs_crypto_hash_init(algo, key->data, key->len);
+ if (IS_ERR(desc)) {
+ rc = PTR_ERR(desc);
+ goto out_init_failed;
+ }
- crypto_free_hash(tfm);
- return rc;
+ rc2 = gss_digest_hash(desc, NULL, msg_count, msgs, iov_count, iovs,
+ token);
+ rc = cfs_crypto_hash_final(desc, key->data, &key->len);
+ if (!rc && rc2)
+ rc = rc2;
+out_init_failed:
+ return rc ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
static
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_make_hmac(sk_hmac_types[skc->sc_hmac].sht_name,
+
+ return sk_make_hmac(skc->sc_hmac,
&skc->sc_hmac_key, message_count, messages,
iov_count, iovs, token);
}
static
-__u32 sk_verify_hmac(struct sk_hmac_type *sht, rawobj_t *key, int message_count,
- rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
- rawobj_t *token)
+u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
+ int message_count, rawobj_t *messages,
+ int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
__u32 rc = GSS_S_FAILURE;
- checksum.len = sht->sht_bytes;
+ checksum.len = cfs_crypto_hash_digestsize(algo);
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
- if (sk_make_hmac(sht->sht_name, key, message_count, messages,
- iov_count, iovs, &checksum)) {
+ if (sk_make_hmac(algo, key, message_count,
+ messages, iov_count, iovs, &checksum)) {
CDEBUG(D_SEC, "Failed to create checksum to validate\n");
goto cleanup;
}
* to decrypt up to the number of bytes actually specified from the sender
* (bd_nob) otherwise the calulated HMAC will be incorrect. */
static
-__u32 sk_verify_bulk_hmac(struct sk_hmac_type *sht, rawobj_t *key,
- int msgcnt, rawobj_t *msgs, int iovcnt,
- lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
+u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
+ int msgcnt, rawobj_t *msgs, int iovcnt,
+ lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
{
+ struct cfs_crypto_hash_desc *desc;
rawobj_t checksum = RAWOBJ_EMPTY;
- struct crypto_hash *tfm;
- struct hash_desc desc = {
- .tfm = NULL,
- .flags = 0,
- };
+ struct ahash_request *req;
struct scatterlist sg[1];
+ int rc = GSS_S_FAILURE;
struct sg_table sgt;
int bytes;
int i;
- int rc = GSS_S_FAILURE;
- checksum.len = sht->sht_bytes;
+ checksum.len = cfs_crypto_hash_digestsize(sc_hmac);
if (token->len < checksum.len) {
CDEBUG(D_SEC, "Token received too short, expected %d "
"received %d\n", token->len, checksum.len);
if (!checksum.data)
return rc;
- tfm = crypto_alloc_hash(sht->sht_name, 0, 0);
- if (IS_ERR(tfm))
+ desc = cfs_crypto_hash_init(sc_hmac, key->data, key->len);
+ if (IS_ERR(desc))
goto cleanup;
- desc.tfm = tfm;
-
- LASSERT(token->len >= crypto_hash_digestsize(tfm));
-
- rc = crypto_hash_setkey(tfm, key->data, key->len);
- if (rc)
- goto hash_cleanup;
-
- rc = crypto_hash_init(&desc);
- if (rc)
- goto hash_cleanup;
-
+ req = (struct ahash_request *) desc;
for (i = 0; i < msgcnt; i++) {
- if (msgs[i].len == 0)
+ if (!msgs[i].len)
continue;
rc = gss_setup_sgtable(&sgt, sg, msgs[i].data, msgs[i].len);
if (rc != 0)
goto hash_cleanup;
- rc = crypto_hash_update(&desc, sg, msgs[i].len);
+ ahash_request_set_crypt(req, sg, NULL, msgs[i].len);
+ rc = crypto_ahash_update(req);
if (rc) {
gss_teardown_sgtable(&sgt);
goto hash_cleanup;
sg_init_table(sg, 1);
sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
iovs[i].kiov_offset);
- rc = crypto_hash_update(&desc, sg, bytes);
+ ahash_request_set_crypt(req, sg, NULL, bytes);
+ rc = crypto_ahash_update(req);
if (rc)
goto hash_cleanup;
}
- crypto_hash_final(&desc, checksum.data);
-
if (memcmp(token->data, checksum.data, checksum.len)) {
rc = GSS_S_BAD_SIG;
goto hash_cleanup;
rc = GSS_S_COMPLETE;
hash_cleanup:
- crypto_free_hash(tfm);
+ cfs_crypto_hash_final(desc, checksum.data, &checksum.len);
cleanup:
OBD_FREE_LARGE(checksum.data, checksum.len);
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- return sk_verify_hmac(&sk_hmac_types[skc->sc_hmac], &skc->sc_hmac_key,
+
+ return sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key,
message_count, messages, iov_count, iovs, token);
}
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr skh;
rawobj_t msgbufs[3];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
if (gss_crypt_rawobjs(skc->sc_session_kb.kb_tfm, local_iv, 1, message,
&skw.skw_cipher, 1))
return GSS_S_FAILURE;
msgbufs[2] = skw.skw_cipher;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
- if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 3, msgbufs, 0,
- NULL, &skw.skw_hmac))
+ skw.skw_hmac.len = sht_bytes;
+ if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key,
+ 3, msgbufs, 0, NULL, &skw.skw_hmac))
return GSS_S_FAILURE;
token->len = skw.skw_header.len + skw.skw_cipher.len + skw.skw_hmac.len;
rawobj_t *token, rawobj_t *message)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr *skh;
rawobj_t msgbufs[3];
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht->sht_bytes)
+ if (token->len < sizeof(skh) + sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
+ skw.skw_hmac.len = sht_bytes;
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
if (skw.skw_cipher.len % blocksize != 0)
msgbufs[0] = skw.skw_header;
msgbufs[1] = *gss_header;
msgbufs[2] = skw.skw_cipher;
- rc = sk_verify_hmac(sht, &skc->sc_hmac_key, 3, msgbufs, 0, NULL,
- &skw.skw_hmac);
+ rc = sk_verify_hmac(skc->sc_hmac, &skc->sc_hmac_key, 3, msgbufs,
+ 0, NULL, &skw.skw_hmac);
if (rc)
return rc;
int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr skh;
__u8 local_iv[SK_IV_SIZE];
sk_construct_rfc3686_iv(local_iv, skc->sc_host_random, skh.skh_iv);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
if (sk_encrypt_bulk(skc->sc_session_kb.kb_tfm, local_iv,
desc, &skw.skw_cipher, adj_nob))
return GSS_S_FAILURE;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
- if (sk_make_hmac(sht->sht_name, &skc->sc_hmac_key, 1, &skw.skw_cipher,
+ skw.skw_hmac.len = sht_bytes;
+ if (sk_make_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1, &skw.skw_cipher,
desc->bd_iov_count, GET_ENC_KIOV(desc), &skw.skw_hmac))
return GSS_S_FAILURE;
rawobj_t *token, int adj_nob)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
- struct sk_hmac_type *sht = &sk_hmac_types[skc->sc_hmac];
+ size_t sht_bytes = cfs_crypto_hash_digestsize(skc->sc_hmac);
struct sk_wire skw;
struct sk_hdr *skh;
__u8 local_iv[SK_IV_SIZE];
LASSERT(skc->sc_session_kb.kb_tfm);
- if (token->len < sizeof(skh) + sht->sht_bytes)
+ if (token->len < sizeof(skh) + sht_bytes)
return GSS_S_DEFECTIVE_TOKEN;
skw.skw_header.data = token->data;
skw.skw_header.len = sizeof(struct sk_hdr);
skw.skw_cipher.data = skw.skw_header.data + skw.skw_header.len;
- skw.skw_cipher.len = token->len - skw.skw_header.len - sht->sht_bytes;
+ skw.skw_cipher.len = token->len - skw.skw_header.len - sht_bytes;
skw.skw_hmac.data = skw.skw_cipher.data + skw.skw_cipher.len;
- skw.skw_hmac.len = sht->sht_bytes;
+ skw.skw_hmac.len = sht_bytes;
skh = (struct sk_hdr *)skw.skw_header.data;
rc = sk_verify_header(skh);
if (rc != GSS_S_COMPLETE)
return rc;
- rc = sk_verify_bulk_hmac(&sk_hmac_types[skc->sc_hmac],
- &skc->sc_hmac_key, 1, &skw.skw_cipher,
- desc->bd_iov_count, GET_ENC_KIOV(desc),
- desc->bd_nob, &skw.skw_hmac);
+ rc = sk_verify_bulk_hmac(skc->sc_hmac, &skc->sc_hmac_key, 1,
+ &skw.skw_cipher, desc->bd_iov_count,
+ GET_ENC_KIOV(desc), desc->bd_nob,
+ &skw.skw_hmac);
if (rc)
return rc;
#define SK_DEFAULT_PRIME_BITS 2048
#define SK_DEFAULT_NODEMAP "default"
-/* Names match up with openssl enc and dgst commands */
-char *sk_crypt2name[] = {
- [SK_CRYPT_EMPTY] = "NONE",
- [SK_CRYPT_AES256_CTR] = "AES-256-CTR",
-};
-
-char *sk_hmac2name[] = {
- [SK_HMAC_EMPTY] = "NONE",
- [SK_HMAC_SHA256] = "SHA256",
- [SK_HMAC_SHA512] = "SHA512",
-};
-
-static int sk_name2crypt(char *name)
-{
- int i;
-
- for (i = 0; i < SK_CRYPT_MAX; i++) {
- if (strcasecmp(name, sk_crypt2name[i]) == 0)
- return i;
- }
-
- return SK_CRYPT_INVALID;
-}
-
-static int sk_name2hmac(char *name)
-{
- int i;
-
- for (i = 0; i < SK_HMAC_MAX; i++) {
- if (strcasecmp(name, sk_hmac2name[i]) == 0)
- return i;
- }
-
- return SK_HMAC_INVALID;
-}
-
static void usage(FILE *fp, char *program)
{
int i;
fprintf(fp, "Modify/Write Options:\n");
fprintf(fp, "-c|--crypt <num> Cipher for encryption "
"(Default: AES Counter mode)\n");
- for (i = 1; i < SK_CRYPT_MAX; i++)
- fprintf(fp, " %s\n", sk_crypt2name[i]);
-
+ for (i = 1; i < ARRAY_SIZE(sk_crypt_algs); i++)
+ fprintf(fp, " %s\n",
+ sk_crypt_algs[i].sct_name);
fprintf(fp, "-i|--hmac <num> Hash algorithm for integrity "
"(Default: SHA256)\n");
- for (i = 1; i < SK_HMAC_MAX; i++)
- fprintf(fp, " %s\n", sk_hmac2name[i]);
-
+ for (i = 1; i < ARRAY_SIZE(sk_hmac_algs); i++)
+ fprintf(fp, " %s\n",
+ sk_hmac_algs[i].sht_name);
fprintf(fp, "-e|--expire <num> Seconds before contexts from "
"key expire (Default: %d seconds (%.3g days))\n",
SK_DEFAULT_EXPIRE, (double)SK_DEFAULT_EXPIRE / 3600 / 24);
"client)\n");
fprintf(fp, "-k|--key-bits <len> Shared key length in bits "
"(Default: %d)\n", SK_DEFAULT_SK_KEYLEN);
- fprintf(fp, "-d|--data <file> Key random data source "
- "(Default: /dev/random)\n\n");
+ fprintf(fp, "-d|--data <file> Key data source for new keys "
+ "(Default: /dev/random)\n");
+ fprintf(fp, " Not a seed value. "
+ "This is the actual key value.\n\n");
fprintf(fp, "Other Options:\n");
fprintf(fp, "-v|--verbose Increase verbosity for errors\n");
exit(EXIT_FAILURE);
if (config->skc_type & SK_TYPE_CLIENT)
printf(" client");
printf("\n");
- printf("HMAC alg: %s\n", sk_hmac2name[config->skc_hmac_alg]);
- printf("Crypto alg: %s\n", sk_crypt2name[config->skc_crypt_alg]);
+ printf("HMAC alg: %s\n", sk_hmac2name(config->skc_hmac_alg));
+ printf("Crypto alg: %s\n", sk_crypt2name(config->skc_crypt_alg));
printf("Ctx Expiration: %u seconds\n", config->skc_expire);
printf("Shared keylen: %u bits\n", config->skc_shared_keylen);
printf("Prime length: %u bits\n", config->skc_prime_bits);
int verbose = 0;
int i;
int opt;
- enum sk_key_type type = SK_TYPE_INVALID;
+ enum sk_key_type type = SK_TYPE_INVALID;
bool generate_prime = false;
DH *dh;
return EXIT_FAILURE;
}
+ if (modify && datafile) {
+ fprintf(stderr,
+ "error: data file option not valid in key modify\n");
+ return EXIT_FAILURE;
+ }
+
if (modify) {
config = sk_read_file(modify);
if (!config)
#define SK_PBKDF2_ITERATIONS 10000
-static struct sk_crypt_type sk_crypt_types[] = {
- [SK_CRYPT_AES256_CTR] = {
- .sct_name = "ctr(aes)",
- .sct_bytes = 32,
- },
-};
-
-static struct sk_hmac_type sk_hmac_types[] = {
- [SK_HMAC_SHA256] = {
- .sht_name = "hmac(sha256)",
- .sht_bytes = 32,
- },
- [SK_HMAC_SHA512] = {
- .sht_name = "hmac(sha512)",
- .sht_bytes = 64,
- },
-};
-
#ifdef _NEW_BUILD_
# include "lgss_utils.h"
#else
printerr(0, "Null configuration passed\n");
return -1;
}
+
if (config->skc_version != SK_CONF_VERSION) {
printerr(0, "Invalid version\n");
return -1;
}
- if (config->skc_hmac_alg >= SK_HMAC_MAX) {
+
+ if (config->skc_hmac_alg == SK_HMAC_INVALID) {
printerr(0, "Invalid HMAC algorithm\n");
return -1;
}
- if (config->skc_crypt_alg >= SK_CRYPT_MAX) {
+
+ if (config->skc_crypt_alg == SK_CRYPT_INVALID) {
printerr(0, "Invalid crypt algorithm\n");
return -1;
}
+
if (config->skc_expire < 60 || config->skc_expire > INT_MAX) {
/* Try to limit key expiration to some reasonable minimum and
* also prevent values over INT_MAX because there appears
kctx = &skc->sc_kctx;
kctx->skc_version = config->skc_version;
- kctx->skc_hmac_alg = config->skc_hmac_alg;
- kctx->skc_crypt_alg = config->skc_crypt_alg;
+ strcpy(kctx->skc_hmac_alg, sk_hmac2name(config->skc_hmac_alg));
+ strcpy(kctx->skc_crypt_alg, sk_crypt2name(config->skc_crypt_alg));
kctx->skc_expire = config->skc_expire;
/* key payload format is in bits, convert to bytes */
*
* \retval EVP_MD
*/
-static inline const EVP_MD *sk_hash_to_evp_md(enum sk_hmac_alg alg)
+static inline const EVP_MD *sk_hash_to_evp_md(enum cfs_crypto_hash_alg alg)
{
switch (alg) {
- case SK_HMAC_SHA256:
+ case CFS_HASH_ALG_SHA256:
return EVP_sha256();
- case SK_HMAC_SHA512:
+ case CFS_HASH_ALG_SHA512:
return EVP_sha512();
default:
return EVP_md_null();
* If the size is smaller it will take copy the first N bytes necessary to
* fill the derived key. */
int sk_kdf(gss_buffer_desc *derived_key , gss_buffer_desc *origin_key,
- gss_buffer_desc *key_binding_bufs, int numbufs, int hmac_alg)
+ gss_buffer_desc *key_binding_bufs, int numbufs,
+ enum cfs_crypto_hash_alg hmac_alg)
{
size_t remain;
size_t bytes;
return rc;
}
- if (sk_hmac_types[hmac_alg].sht_bytes != tmp_hash.length) {
+ if (cfs_crypto_hash_digestsize(hmac_alg) != tmp_hash.length) {
free(tmp_hash.value);
return -EINVAL;
}
struct sk_kernel_ctx *kctx = &skc->sc_kctx;
gss_buffer_desc *session_key = &kctx->skc_session_key;
gss_buffer_desc bufs[5];
+ enum cfs_crypto_crypt_alg crypt_alg;
int rc = -1;
- session_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
+ crypt_alg = cfs_crypto_crypt_alg(kctx->skc_crypt_alg);
+ session_key->length = cfs_crypto_crypt_keysize(crypt_alg);
session_key->value = malloc(session_key->length);
if (!session_key->value) {
printerr(0, "Failed to allocate memory for session key\n");
bufs[4] = *server_token;
return sk_kdf(&kctx->skc_session_key, &kctx->skc_shared_key, bufs,
- 5, kctx->skc_hmac_alg);
+ 5, cfs_crypto_hash_alg(kctx->skc_hmac_alg));
}
/* Uses the session key to create an HMAC key and encryption key. In
gss_buffer_desc *session_key = &kctx->skc_session_key;
gss_buffer_desc *hmac_key = &kctx->skc_hmac_key;
gss_buffer_desc *encrypt_key = &kctx->skc_encrypt_key;
+ enum cfs_crypto_hash_alg hmac_alg;
+ enum cfs_crypto_crypt_alg crypt_alg;
char *encrypt = "Encrypt";
char *integrity = "Integrity";
int rc;
- hmac_key->length = sk_hmac_types[kctx->skc_hmac_alg].sht_bytes;
+ hmac_alg = cfs_crypto_hash_alg(kctx->skc_hmac_alg);
+ hmac_key->length = cfs_crypto_hash_digestsize(hmac_alg);
hmac_key->value = malloc(hmac_key->length);
if (!hmac_key->value)
return -ENOMEM;
rc = PKCS5_PBKDF2_HMAC(integrity, -1, session_key->value,
session_key->length, SK_PBKDF2_ITERATIONS,
- sk_hash_to_evp_md(kctx->skc_hmac_alg),
+ sk_hash_to_evp_md(hmac_alg),
hmac_key->length, hmac_key->value);
if (rc == 0)
return -EINVAL;
if ((skc->sc_flags & LGSS_SVC_PRIV) == 0)
return 0;
- encrypt_key->length = sk_crypt_types[kctx->skc_crypt_alg].sct_bytes;
+ crypt_alg = cfs_crypto_crypt_alg(kctx->skc_crypt_alg);
+ encrypt_key->length = cfs_crypto_crypt_keysize(crypt_alg);
encrypt_key->value = malloc(encrypt_key->length);
if (!encrypt_key->value)
return -ENOMEM;
rc = PKCS5_PBKDF2_HMAC(encrypt, -1, session_key->value,
session_key->length, SK_PBKDF2_ITERATIONS,
- sk_hash_to_evp_md(kctx->skc_hmac_alg),
+ sk_hash_to_evp_md(hmac_alg),
encrypt_key->length, encrypt_key->value);
if (rc == 0)
return -EINVAL;
#include <openssl/evp.h>
#include <sys/types.h>
+#include <libcfs/libcfs_crypto.h>
#include "lsupport.h"
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(a) ((sizeof(a)) / (sizeof((a)[0])))
+#endif /* !ARRAY_SIZE */
+
+/* LL_CRYPTO_MAX_NAME value must match value of
+ * CRYPTO_MAX_ALG_NAME in include/linux/crypto.h
+ */
+#ifdef HAVE_CRYPTO_MAX_ALG_NAME_128
+#define LL_CRYPTO_MAX_NAME 128
+#else
+#define LL_CRYPTO_MAX_NAME 64
+#endif
+
/* Some limits and defaults */
#define SK_CONF_VERSION 1
#define SK_MSG_VERSION 1
/* Format passed to the kernel from userspace */
struct sk_kernel_ctx {
uint32_t skc_version;
- uint16_t skc_hmac_alg;
- uint16_t skc_crypt_alg;
+ char skc_hmac_alg[LL_CRYPTO_MAX_NAME];
+ char skc_crypt_alg[LL_CRYPTO_MAX_NAME];
uint32_t skc_expire;
uint32_t skc_host_random;
uint32_t skc_peer_random;
DH *sc_params;
};
+/* Names match up with openssl enc and dgst commands */
+/* When adding new alg types, make sure first occurrence's name
+ * matches cht_name in hash_types array.
+ */
+static const struct sk_crypt_type sk_crypt_algs[] = {
+ {
+ .sct_name = "null",
+ .sct_type = SK_CRYPT_EMPTY
+ },
+ {
+ .sct_name = "NONE",
+ .sct_type = SK_CRYPT_EMPTY
+ },
+ {
+ .sct_name = "ctr(aes)",
+ .sct_type = SK_CRYPT_AES256_CTR
+ },
+ {
+ .sct_name = "AES-256-CTR",
+ .sct_type = SK_CRYPT_AES256_CTR
+ }
+};
+static const struct sk_hmac_type sk_hmac_algs[] = {
+ {
+ .sht_name = "null",
+ .sht_type = SK_HMAC_EMPTY
+ },
+ {
+ .sht_name = "NONE",
+ .sht_type = SK_HMAC_EMPTY
+ },
+ {
+ .sht_name = "sha256",
+ .sht_type = SK_HMAC_SHA256
+ },
+ {
+ .sht_name = "SHA256",
+ .sht_type = SK_HMAC_SHA256
+ },
+ {
+ .sht_name = "sha512",
+ .sht_type = SK_HMAC_SHA512
+ },
+ {
+ .sht_name = "SHA512",
+ .sht_type = SK_HMAC_SHA512
+ }
+};
+
+static inline int sk_name2crypt(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sk_crypt_algs); i++) {
+ if (strcasecmp(name, sk_crypt_algs[i].sct_name) == 0)
+ return sk_crypt_algs[i].sct_type;
+ }
+
+ return SK_CRYPT_INVALID;
+}
+
+static inline int sk_name2hmac(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sk_hmac_algs); i++) {
+ if (strcasecmp(name, sk_hmac_algs[i].sht_name) == 0)
+ return sk_hmac_algs[i].sht_type;
+ }
+
+ return SK_HMAC_INVALID;
+}
+
+static inline const char *sk_crypt2name(enum sk_crypt_alg type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sk_crypt_algs); i++) {
+ if (type == sk_crypt_algs[i].sct_type)
+ return sk_crypt_algs[i].sct_name;
+ }
+
+ return NULL;
+}
+
+static inline const char *sk_hmac2name(enum sk_hmac_alg type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sk_hmac_algs); i++) {
+ if (type == sk_hmac_algs[i].sht_type)
+ return sk_hmac_algs[i].sht_name;
+ }
+
+ return NULL;
+}
+
void sk_init_logging(char *program, int verbose, int fg);
struct sk_keyfile_config *sk_read_file(char *filename);
int sk_load_keyfile(char *path);