*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
#include "gss_asn1.h"
#include "gss_krb5.h"
-static cfs_spinlock_t krb5_seq_lock;
+static spinlock_t krb5_seq_lock;
struct krb5_enctype {
char *ke_dispname;
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
if (IS_ERR(kb->kb_tfm)) {
CERROR("failed to alloc tfm: %s, mode %d\n",
alg_name, alg_mode);
return -1;
}
- if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- ll_crypto_free_blkcipher(kb->kb_tfm);
+ crypto_free_blkcipher(kb->kb_tfm);
}
static
static
void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
- sg->page = virt_to_page(ptr);
- sg->offset = offset_in_page(ptr);
- sg->length = len;
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, ptr, len);
}
static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
int decrypt,
void * iv,
void * in,
desc.info = local_iv;
desc.flags= 0;
- if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, ll_crypto_blkcipher_blocksize(tfm));
+ length, crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
}
-#ifdef HAVE_ASYNC_BLOCK_CIPHER
-
static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
struct scatterlist sg[1];
int i;
- ll_crypto_hash_setkey(tfm, key->data, key->len);
+ crypto_hash_setkey(tfm, key->data, key->len);
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
- }
- if (khdr) {
- buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
- }
-
- return ll_crypto_hash_final(&desc, cksum->data);
-}
-
-#else /* ! HAVE_ASYNC_BLOCK_CIPHER */
-
-static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
- rawobj_t *key,
- struct krb5_header *khdr,
- int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
- rawobj_t *cksum)
-{
- struct scatterlist sg[1];
- __u32 keylen = key->len, i;
-
- crypto_hmac_init(tfm, key->data, &keylen);
-
- for (i = 0; i < msgcnt; i++) {
- if (msgs[i].len == 0)
- continue;
- buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_hmac_update(tfm, sg, 1);
- }
-
- for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
- continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
- crypto_hmac_update(tfm, sg, 1);
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_hmac_update(tfm, sg, 1);
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
- return 0;
+ return crypto_hash_final(&desc, cksum->data);
}
-#endif /* HAVE_ASYNC_BLOCK_CIPHER */
-
static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
if (iovs[i].kiov_len == 0)
continue;
- sg[0].page = iovs[i].kiov_page;
- sg[0].offset = iovs[i].kiov_offset;
- sg[0].length = iovs[i].kiov_len;
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- ll_crypto_hash_final(&desc, cksum->data);
+ crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
__u32 code = GSS_S_FAILURE;
int rc;
- if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+ if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
- cksum->len = ll_crypto_hash_digestsize(tfm);
+ cksum->len = crypto_hash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return code;
}
}
khdr->kh_filler = 0xff;
- cfs_spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- cfs_spin_unlock(&krb5_seq_lock);
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
}
static __u32 verify_krb5_header(struct krb5_ctx *kctx,
}
static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
if (mode_ecb) {
if (enc)
- rc = ll_crypto_blkcipher_encrypt(
+ rc = crypto_blkcipher_encrypt(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt(
+ rc = crypto_blkcipher_decrypt(
&desc, &dst, &src, src.length);
} else {
if (enc)
- rc = ll_crypto_blkcipher_encrypt_iv(
+ rc = crypto_blkcipher_encrypt_iv(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt_iv(
+ rc = crypto_blkcipher_decrypt_iv(
&desc, &dst, &src, src.length);
}
* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
char *confounder,
struct ptlrpc_bulk_desc *desc,
LASSERT(desc->bd_iov_count);
LASSERT(desc->bd_enc_iov);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
buf_to_sg(&src, confounder, blocksize);
buf_to_sg(&dst, cipher->data, blocksize);
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to encrypt confounder: %d\n", rc);
return rc;
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
- src.page = desc->bd_iov[i].kiov_page;
- src.offset = desc->bd_iov[i].kiov_offset;
- src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
- (~(blocksize - 1));
-
- if (adj_nob)
- nob += src.length;
-
- dst.page = desc->bd_enc_iov[i].kiov_page;
- dst.offset = src.offset;
- dst.length = src.length;
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to encrypt page: %d\n", rc);
buf_to_sg(&src, khdr, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
- &dst, &src, sizeof(*khdr));
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
if (rc) {
CERROR("error to encrypt krb5 header: %d\n", rc);
return rc;
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
struct ptlrpc_bulk_desc *desc,
rawobj_t *cipher,
LASSERT(desc->bd_enc_iov);
LASSERT(desc->bd_nob_transferred);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
buf_to_sg(&src, cipher->data, blocksize);
buf_to_sg(&dst, plain->data, blocksize);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to decrypt confounder: %d\n", rc);
return rc;
if (desc->bd_enc_iov[i].kiov_len == 0)
continue;
- src.page = desc->bd_enc_iov[i].kiov_page;
- src.offset = desc->bd_enc_iov[i].kiov_offset;
- src.length = desc->bd_enc_iov[i].kiov_len;
-
- dst = src;
- if (desc->bd_iov[i].kiov_len % blocksize == 0)
- dst.page = desc->bd_iov[i].kiov_page;
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
- src.length);
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
if (rc) {
CERROR("error to decrypt page: %d\n", rc);
return rc;
}
if (desc->bd_iov[i].kiov_len % blocksize != 0) {
- memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
+ memcpy(page_address(desc->bd_iov[i].kiov_page) +
desc->bd_iov[i].kiov_offset,
- cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
+ page_address(desc->bd_enc_iov[i].kiov_page) +
desc->bd_iov[i].kiov_offset,
desc->bd_iov[i].kiov_len);
}
buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
- &dst, &src, sizeof(*khdr));
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
if (rc) {
CERROR("error to decrypt tail: %d\n", rc);
return rc;
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
cipher.len = token->len - sizeof(*khdr);
LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
- if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
- if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
- CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, rc = -EACCES);
- }
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
3, data_desc, &cipher, 1);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
LASSERT(desc->bd_enc_iov);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_iov[i].kiov_page);
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
plain_out.data = tmpbuf;
plain_out.len = bodysize;
- if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
- cksum.data = token->data + token->len - ke->ke_hash_size;
- cksum.len = ke->ke_hash_size;
+ cksum.data = token->data + token->len - ke->ke_hash_size;
+ cksum.len = ke->ke_hash_size;
- if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
- CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, rc = -EACCES);
- }
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm,
+ if (crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
int __init init_kerberos_module(void)
{
- int status;
+ int status;
- cfs_spin_lock_init(&krb5_seq_lock);
+ spin_lock_init(&krb5_seq_lock);
- status = lgss_mech_register(&gss_kerberos_mech);
- if (status)
- CERROR("Failed to register kerberos gss mechanism!\n");
- return status;
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
}
-void __exit cleanup_kerberos_module(void)
+void cleanup_kerberos_module(void)
{
lgss_mech_unregister(&gss_kerberos_mech);
}