-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#else
#include <liblustre.h>
#include "gss_asn1.h"
#include "gss_krb5.h"
-spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t krb5_seq_lock;
struct krb5_enctype {
char *ke_dispname;
static struct krb5_enctype enctypes[] = {
[ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
"des-cbc-md5",
- "des",
+ "cbc(des)",
"md5",
- CRYPTO_TFM_MODE_CBC,
+ 0,
16,
8,
0,
},
[ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
"des3-hmac-sha1",
- "des3_ede",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(des3_ede)",
+ "hmac(sha1)",
+ 0,
20,
8,
1,
},
[ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
"aes128-cts-hmac-sha1-96",
- "aes",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
12,
16,
1,
},
[ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
"aes256-cts-hmac-sha1-96",
- "aes",
- "sha1",
- CRYPTO_TFM_MODE_CBC,
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
12,
16,
1,
},
[ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
"arcfour-hmac-md5",
- "arc4",
- "md5",
- CRYPTO_TFM_MODE_ECB,
+ "ecb(arc4)",
+ "hmac(md5)",
+ 0,
16,
8,
1,
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = crypto_alloc_tfm(alg_name, alg_mode);
- if (kb->kb_tfm == NULL) {
- CERROR("failed to alloc tfm: %s, mode %d\n",
- alg_name, alg_mode);
- return -1;
- }
-
- if (crypto_cipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ if (IS_ERR(kb->kb_tfm)) {
+ CERROR("failed to alloc tfm: %s, mode %d\n",
+ alg_name, alg_mode);
+ return -1;
+ }
+
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- crypto_free_tfm(kb->kb_tfm);
+ crypto_free_blkcipher(kb->kb_tfm);
}
static
if (q > end || q < p)
return -1;
- OBD_ALLOC(res->data, len);
+ OBD_ALLOC_LARGE(res->data, len);
if (!res->data)
return -1;
{
char *buf;
- OBD_ALLOC(buf, keysize);
+ OBD_ALLOC_LARGE(buf, keysize);
if (buf == NULL)
return -1;
if (get_bytes(ptr, end, buf, keysize)) {
- OBD_FREE(buf, keysize);
+ OBD_FREE_LARGE(buf, keysize);
return -1;
}
knew->kc_cfx = kctx->kc_cfx;
knew->kc_seed_init = kctx->kc_seed_init;
knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
-#if 0
knew->kc_endtime = kctx->kc_endtime;
-#else
- /* FIXME reverse context don't expire for now */
- knew->kc_endtime = INT_MAX;
-#endif
+
memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
knew->kc_seq_send = kctx->kc_seq_recv;
knew->kc_seq_recv = kctx->kc_seq_send;
}
static
-void buf_to_sg(struct scatterlist *sg, char *ptr, int len)
+void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
- sg->page = virt_to_page(ptr);
- sg->offset = offset_in_page(ptr);
- sg->length = len;
+ sg_set_buf(sg, ptr, len);
}
static
-__u32 krb5_encrypt(struct crypto_tfm *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
int decrypt,
void * iv,
void * in,
void * out,
int length)
{
- struct scatterlist sg;
+ struct blkcipher_desc desc;
+ struct scatterlist sg;
__u8 local_iv[16] = {0};
__u32 ret = -EINVAL;
LASSERT(tfm);
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags= 0;
- if (length % crypto_tfm_alg_blocksize(tfm) != 0) {
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, crypto_tfm_alg_blocksize(tfm));
+ length, crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (crypto_tfm_alg_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", crypto_tfm_alg_ivsize(tfm));
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = crypto_cipher_decrypt_iv(tfm, &sg, &sg, length, local_iv);
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = crypto_cipher_encrypt_iv(tfm, &sg, &sg, length, local_iv);
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
}
static inline
-int krb5_digest_hmac(struct crypto_tfm *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
+ struct hash_desc desc;
struct scatterlist sg[1];
- __u32 keylen = key->len, i;
+ int i;
+
+ crypto_hash_setkey(tfm, key->data, key->len);
+ desc.tfm = tfm;
+ desc.flags= 0;
- crypto_hmac_init(tfm, key->data, &keylen);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_hmac_update(tfm, sg, 1);
+ crypto_hash_update(&desc, sg, msgs[i].len);
+ }
+
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_hmac_update(tfm, sg, 1);
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
- return 0;
+ return crypto_hash_final(&desc, cksum->data);
}
static inline
-int krb5_digest_norm(struct crypto_tfm *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
+ struct hash_desc desc;
struct scatterlist sg[1];
int i;
LASSERT(kb->kb_tfm);
+ desc.tfm = tfm;
+ desc.flags= 0;
- crypto_digest_init(tfm);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- crypto_digest_update(tfm, sg, 1);
+ crypto_hash_update(&desc, sg, msgs[i].len);
+ }
+
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- crypto_digest_update(tfm, sg, 1);
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- crypto_digest_final(tfm, cksum->data);
+ crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
- struct krb5_enctype *ke = &enctypes[enctype];
- struct crypto_tfm *tfm;
- __u32 code = GSS_S_FAILURE;
- int rc;
+ struct krb5_enctype *ke = &enctypes[enctype];
+ struct crypto_hash *tfm;
+ __u32 code = GSS_S_FAILURE;
+ int rc;
- if (!(tfm = crypto_alloc_tfm(ke->ke_hash_name, 0))) {
+ if (!(tfm = crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
return GSS_S_FAILURE;
}
- cksum->len = crypto_tfm_alg_digestsize(tfm);
- OBD_ALLOC(cksum->data, cksum->len);
+ cksum->len = crypto_hash_digestsize(tfm);
+ OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
goto out_tfm;
if (ke->ke_hash_hmac)
rc = krb5_digest_hmac(tfm, &kb->kb_key,
- khdr, msgcnt, msgs, cksum);
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
else
rc = krb5_digest_norm(tfm, kb,
- khdr, msgcnt, msgs, cksum);
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- crypto_free_tfm(tfm);
+ crypto_free_hash(tfm);
return code;
}
+static void fill_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+
+ acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
+
+ if (privacy) {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
+ khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
+ khdr->kh_ec = cpu_to_be16(0);
+ khdr->kh_rrc = cpu_to_be16(0);
+ } else {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
+ khdr->kh_flags = acceptor_flag;
+ khdr->kh_ec = cpu_to_be16(0xffff);
+ khdr->kh_rrc = cpu_to_be16(0xffff);
+ }
+
+ khdr->kh_filler = 0xff;
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
+}
+
+static __u32 verify_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+ __u16 tok_id, ec_rrc;
+
+ acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
+
+ if (privacy) {
+ tok_id = KG_TOK_WRAP_MSG;
+ ec_rrc = 0x0;
+ } else {
+ tok_id = KG_TOK_MIC_MSG;
+ ec_rrc = 0xffff;
+ }
+
+ /* sanity checks */
+ if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
+ CERROR("bad token id\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
+ CERROR("bad direction flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
+ CERROR("missing confidential flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (khdr->kh_filler != 0xff) {
+ CERROR("bad filler\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
+ be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
+ CERROR("bad EC or RRC\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ return GSS_S_COMPLETE;
+}
+
static
__u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
rawobj_t cksum = RAWOBJ_EMPTY;
- __u32 rc = GSS_S_FAILURE;
-
- acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
khdr = (struct krb5_header *) token->data;
-
- khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
- khdr->kh_flags = acceptor_flag;
- khdr->kh_filler = 0xff;
- khdr->kh_ec = cpu_to_be16(0xffff);
- khdr->kh_rrc = cpu_to_be16(0xffff);
- spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ fill_krb5_header(kctx, khdr, 0);
/* checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
- khdr, msgcnt, msgs, &cksum))
- goto out_err;
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
+ return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
ke->ke_hash_size);
token->len = sizeof(*khdr) + ke->ke_hash_size;
- rc = GSS_S_COMPLETE;
-out_err:
rawobj_free(&cksum);
- return rc;
+ return GSS_S_COMPLETE;
}
static
__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
rawobj_t cksum = RAWOBJ_EMPTY;
- __u32 rc = GSS_S_FAILURE;
-
- acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
+ __u32 major;
if (token->len < sizeof(*khdr)) {
CERROR("short signature: %u\n", token->len);
khdr = (struct krb5_header *) token->data;
- /* sanity checks */
- if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) {
- CERROR("bad token id\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
- CERROR("bad direction flag\n");
- return GSS_S_BAD_SIG;
- }
- if (khdr->kh_filler != 0xff) {
- CERROR("bad filler\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if (be16_to_cpu(khdr->kh_ec) != 0xffff ||
- be16_to_cpu(khdr->kh_rrc) != 0xffff) {
- CERROR("bad EC or RRC\n");
- return GSS_S_DEFECTIVE_TOKEN;
+ major = verify_krb5_header(kctx, khdr, 0);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
}
if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
CERROR("short signature: %u, require %d\n",
token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
- goto out;
+ return GSS_S_FAILURE;
}
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
- khdr, msgcnt, msgs, &cksum))
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
+ CERROR("failed to make checksum\n");
return GSS_S_FAILURE;
+ }
LASSERT(cksum.len >= ke->ke_hash_size);
if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
CERROR("checksum mismatch\n");
- rc = GSS_S_BAD_SIG;
- goto out;
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
}
- rc = GSS_S_COMPLETE;
-out:
rawobj_free(&cksum);
- return rc;
+ return GSS_S_COMPLETE;
}
static
}
static
-int krb5_encrypt_rawobjs(struct crypto_tfm *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
rawobj_t *outobj,
int enc)
{
- struct scatterlist src, dst;
- __u8 local_iv[16] = {0}, *buf;
- __u32 datalen = 0;
- int i, rc;
+ struct blkcipher_desc desc;
+ struct scatterlist src, dst;
+ __u8 local_iv[16] = {0}, *buf;
+ __u32 datalen = 0;
+ int i, rc;
ENTRY;
buf = outobj->data;
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags = 0;
for (i = 0; i < inobj_cnt; i++) {
LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
if (mode_ecb) {
if (enc)
- rc = crypto_cipher_encrypt(
- tfm, &dst, &src, src.length);
+ rc = crypto_blkcipher_encrypt(
+ &desc, &dst, &src, src.length);
else
- rc = crypto_cipher_decrypt(
- tfm, &dst, &src, src.length);
+ rc = crypto_blkcipher_decrypt(
+ &desc, &dst, &src, src.length);
} else {
if (enc)
- rc = crypto_cipher_encrypt_iv(
- tfm, &dst, &src, src.length, local_iv);
+ rc = crypto_blkcipher_encrypt_iv(
+ &desc, &dst, &src, src.length);
else
- rc = crypto_cipher_decrypt_iv(
- tfm, &dst, &src, src.length, local_iv);
+ rc = crypto_blkcipher_decrypt_iv(
+ &desc, &dst, &src, src.length);
}
if (rc) {
RETURN(0);
}
+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
+static
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int blocksize, i, rc, nob = 0;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ /* encrypt confounder */
+ buf_to_sg(&src, confounder, blocksize);
+ buf_to_sg(&dst, cipher->data, blocksize);
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
+
+ desc->bd_enc_iov[i].kiov_offset = dst.offset;
+ desc->bd_enc_iov[i].kiov_len = dst.length;
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* encrypt krb5 header */
+ buf_to_sg(&src, khdr, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
+ if (rc) {
+ CERROR("error to encrypt krb5 header: %d\n", rc);
+ return rc;
+ }
+
+ if (adj_nob)
+ desc->bd_nob = nob;
+
+ return 0;
+}
+
+/*
+ * desc->bd_nob_transferred is the size of cipher text received.
+ * desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ * this means we DO NOT support the situation that server send an odd size
+ * data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ * thus kiov_len is accurate already, so we should not adjust it at all.
+ * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ * should have been done by prep_bulk().
+ */
+static
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(desc->bd_nob_transferred);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ /* decrypt head (confounder) */
+ buf_to_sg(&src, cipher->data, blocksize);
+ buf_to_sg(&dst, plain->data, blocksize);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+ i++) {
+ if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
+ desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
+ CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+ i, desc->bd_enc_iov[i].kiov_offset,
+ desc->bd_enc_iov[i].kiov_len, blocksize);
+ return -EFAULT;
+ }
+
+ if (adj_nob) {
+ if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+ desc->bd_nob_transferred)
+ desc->bd_enc_iov[i].kiov_len =
+ desc->bd_nob_transferred - ct_nob;
+
+ desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+ if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
+ desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
+ } else {
+ /* this should be guaranteed by LNET */
+ LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
+ desc->bd_nob_transferred);
+ LASSERT(desc->bd_iov[i].kiov_len <=
+ desc->bd_enc_iov[i].kiov_len);
+ }
+
+ if (desc->bd_enc_iov[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ return rc;
+ }
+
+ if (desc->bd_iov[i].kiov_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ page_address(desc->bd_enc_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ desc->bd_iov[i].kiov_len);
+ }
+
+ ct_nob += desc->bd_enc_iov[i].kiov_len;
+ pt_nob += desc->bd_iov[i].kiov_len;
+ }
+
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ return -EFAULT;
+ }
+
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ return -EFAULT;
+ }
+
+ /* if needed, clear up the rest unused iovs */
+ if (adj_nob)
+ while (i < desc->bd_iov_count)
+ desc->bd_iov[i++].kiov_len = 0;
+
+ /* decrypt tail (krb5 header) */
+ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ sizeof(*khdr));
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
+
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static
__u32 gss_wrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
rawobj_t *msg,
int msg_buflen,
rawobj_t *token)
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
int blocksize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t data_desc[3], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
- int enc_rc = 0;
+ int rc = 0;
LASSERT(ke);
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
- acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
+ /*
+ * final token format:
+ * ---------------------------------------------------
+ * | krb5 header | cipher text | checksum (16 bytes) |
+ * ---------------------------------------------------
+ */
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
khdr = (struct krb5_header *) token->data;
-
- khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
- khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
- khdr->kh_filler = 0xff;
- khdr->kh_ec = cpu_to_be16(0);
- khdr->kh_rrc = cpu_to_be16(0);
- spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
- get_random_bytes(conf, ke->ke_conf_size);
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
- * a tfm, currently only for arcfour-hmac
- */
+ * a tfm, currently only for arcfour-hmac */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
return GSS_S_FAILURE;
/*
- * clear text layout, same for both checksum & encryption:
+ * clear text layout for checksum:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+ data_desc[1].data = gsshdr->data;
+ data_desc[1].len = gsshdr->len;
+ data_desc[2].data = msg->data;
+ data_desc[2].len = msg->len;
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 3, data_desc, 0, NULL, &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
* -----------------------------------------
* | confounder | clear msgs | krb5 header |
* -----------------------------------------
data_desc[2].data = (__u8 *) khdr;
data_desc[2].len = sizeof(*khdr);
- /* compute checksum */
- if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 3, data_desc, &cksum))
- return GSS_S_FAILURE;
- LASSERT(cksum.len >= ke->ke_hash_size);
-
- /* encrypting, cipher text will be directly inplace */
+ /* cipher text will be directly inplace */
cipher.data = (__u8 *) (khdr + 1);
cipher.len = token->len - sizeof(*khdr);
LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
- if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct crypto_tfm *arc4_tfm;
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
- if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, &arc4_keye)) {
- CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, enc_rc = -EACCES);
- }
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
- arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
- if (arc4_tfm == NULL) {
- CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, enc_rc = -EACCES);
- }
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
- if (crypto_cipher_setkey(arc4_tfm,
- arc4_keye.data, arc4_keye.len)) {
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
- GOTO(arc4_out_tfm, enc_rc = -EACCES);
+ GOTO(arc4_out_tfm, rc = -EACCES);
}
- enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
- 3, data_desc, &cipher, 1);
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 3, data_desc, &cipher, 1);
arc4_out_tfm:
- crypto_free_tfm(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
do {} while(0); /* just to avoid compile warning */
} else {
- enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
- 3, data_desc, &cipher, 1);
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 3, data_desc, &cipher, 1);
+ }
+
+ if (rc != 0) {
+ rawobj_free(&cksum);
+ return GSS_S_FAILURE;
+ }
+
+ /* fill in checksum */
+ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+ memcpy((char *)(khdr + 1) + cipher.len,
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size);
+ rawobj_free(&cksum);
+
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ int blocksize, i;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(kctx->kc_keye.kb_tfm);
+
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(desc->bd_enc_iov[i].kiov_page);
+ /*
+ * offset should always start at page boundary of either
+ * client or server side.
+ */
+ if (desc->bd_iov[i].kiov_offset & blocksize) {
+ CERROR("odd offset %d in page %d\n",
+ desc->bd_iov[i].kiov_offset, i);
+ return GSS_S_FAILURE;
+ }
+
+ desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
+ desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
+ blocksize - 1) & (~(blocksize - 1));
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token, int adj_nob)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t data_desc[1], cipher;
+ __u8 conf[GSS_MAX_CIPHER_BLOCK];
+ int rc = 0;
+
+ LASSERT(ke);
+ LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+
+ /*
+ * final token format:
+ * --------------------------------------------------
+ * | krb5 header | head/tail cipher text | checksum |
+ * --------------------------------------------------
+ */
+
+ /* fill krb5 header */
+ LASSERT(token->len >= sizeof(*khdr));
+ khdr = (struct krb5_header *) token->data;
+ fill_krb5_header(kctx, khdr, 1);
+
+ /* generate confounder */
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
+
+ /* get encryption blocksize. note kc_keye might not associated with
+ * a tfm, currently only for arcfour-hmac */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+
+ /*
+ * we assume the size of krb5_header (16 bytes) must be n * blocksize.
+ * the bulk token size would be exactly (sizeof(krb5_header) +
+ * blocksize + sizeof(krb5_header) + hashsize)
+ */
+ LASSERT(blocksize <= ke->ke_conf_size);
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+ LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+
+ /*
+ * clear text layout for checksum:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ * | | |
+ * ---------- (cipher pages) |
+ * result token: | |
+ * -------------------------------------------
+ * | krb5 header | cipher text | cipher text |
+ * -------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LBUG();
+ rc = 0;
+ } else {
+ rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ conf, desc, &cipher, adj_nob);
}
- if (enc_rc != 0) {
+ if (rc != 0) {
rawobj_free(&cksum);
return GSS_S_FAILURE;
}
static
__u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
rawobj_t *token,
rawobj_t *msg)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
unsigned char *tmpbuf;
int blocksize, bodysize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher_in, plain_out;
- __u32 rc = GSS_S_FAILURE, enc_rc = 0;
+ rawobj_t hash_objs[3];
+ int rc = 0;
+ __u32 major;
LASSERT(ke);
- acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
-
if (token->len < sizeof(*khdr)) {
CERROR("short signature: %u\n", token->len);
return GSS_S_DEFECTIVE_TOKEN;
khdr = (struct krb5_header *) token->data;
- /* sanity check header */
- if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) {
- CERROR("bad token id\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
- CERROR("bad direction flag\n");
- return GSS_S_BAD_SIG;
- }
- if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
- CERROR("missing confidential flag\n");
- return GSS_S_BAD_SIG;
- }
- if (khdr->kh_filler != 0xff) {
- CERROR("bad filler\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if (be16_to_cpu(khdr->kh_ec) != 0x0 ||
- be16_to_cpu(khdr->kh_rrc) != 0x0) {
- CERROR("bad EC or RRC\n");
- return GSS_S_DEFECTIVE_TOKEN;
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
}
/* block size */
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = crypto_tfm_alg_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
}
/* decrypting */
- OBD_ALLOC(tmpbuf, bodysize);
+ OBD_ALLOC_LARGE(tmpbuf, bodysize);
if (!tmpbuf)
return GSS_S_FAILURE;
+ major = GSS_S_FAILURE;
+
cipher_in.data = (__u8 *) (khdr + 1);
cipher_in.len = bodysize;
plain_out.data = tmpbuf;
plain_out.len = bodysize;
- if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
- rawobj_t arc4_keye;
- struct crypto_tfm *arc4_tfm;
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
- cksum.data = token->data + token->len - ke->ke_hash_size;
- cksum.len = ke->ke_hash_size;
+ cksum.data = token->data + token->len - ke->ke_hash_size;
+ cksum.len = ke->ke_hash_size;
- if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, &arc4_keye)) {
- CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, enc_rc = -EACCES);
- }
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
- arc4_tfm = crypto_alloc_tfm("arc4", CRYPTO_TFM_MODE_ECB);
- if (arc4_tfm == NULL) {
- CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, enc_rc = -EACCES);
- }
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
- if (crypto_cipher_setkey(arc4_tfm,
+ if (crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
- GOTO(arc4_out_tfm, enc_rc = -EACCES);
+ GOTO(arc4_out_tfm, rc = -EACCES);
}
- enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
- 1, &cipher_in, &plain_out, 0);
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- crypto_free_tfm(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
cksum = RAWOBJ_EMPTY;
} else {
- enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
- 1, &cipher_in, &plain_out, 0);
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 1, &cipher_in, &plain_out, 0);
}
- if (enc_rc != 0) {
+ if (rc != 0) {
CERROR("error decrypt\n");
goto out_free;
}
* -----------------------------------------
*/
- /* last part must be identical to the krb5 header */
+ /* verify krb5 header in token is not modified */
if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
sizeof(*khdr))) {
- CERROR("decrypted header mismatch\n");
+ CERROR("decrypted krb5 header mismatch\n");
goto out_free;
}
- /* verify checksum */
+ /* verify checksum, compose clear text as layout:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ hash_objs[0].len = ke->ke_conf_size;
+ hash_objs[0].data = plain_out.data;
+ hash_objs[1].len = gsshdr->len;
+ hash_objs[1].data = gsshdr->data;
+ hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
+ hash_objs[2].data = plain_out.data + ke->ke_conf_size;
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 1, &plain_out, &cksum))
+ khdr, 3, hash_objs, 0, NULL, &cksum))
goto out_free;
LASSERT(cksum.len >= ke->ke_hash_size);
if (memcmp((char *)(khdr + 1) + bodysize,
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
- CERROR("cksum mismatch\n");
+ CERROR("checksum mismatch\n");
goto out_free;
}
msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
- rc = GSS_S_COMPLETE;
+ major = GSS_S_COMPLETE;
out_free:
- OBD_FREE(tmpbuf, bodysize);
+ OBD_FREE_LARGE(tmpbuf, bodysize);
rawobj_free(&cksum);
- return rc;
+ return major;
}
static
-__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx,
- int length,
- void *in_buf,
- void *out_buf)
+__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token, int adj_nob)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
- __u32 rc;
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t cipher, plain;
+ rawobj_t data_desc[1];
+ int rc;
+ __u32 major;
- rc = krb5_encrypt(kctx->kc_keye.kb_tfm, 0,
- NULL, in_buf, out_buf, length);
+ LASSERT(ke);
+
+ if (token->len < sizeof(*khdr)) {
+ CERROR("short signature: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ khdr = (struct krb5_header *) token->data;
+
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
+ }
+
+ /* block size */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ LBUG();
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+
+ /*
+ * token format is expected as:
+ * -----------------------------------------------
+ * | krb5 header | head/tail cipher text | cksum |
+ * -----------------------------------------------
+ */
+ if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+ ke->ke_hash_size) {
+ CERROR("short token size: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+ plain.data = cipher.data;
+ plain.len = cipher.len;
+
+ rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ desc, &cipher, &plain, adj_nob);
if (rc)
- CERROR("plain encrypt error: %d\n", rc);
+ return GSS_S_DEFECTIVE_TOKEN;
- return rc;
+ /*
+ * verify checksum, compose clear text as layout:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = plain.data;
+ data_desc[0].len = blocksize;
+
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ if (memcmp(plain.data + blocksize + sizeof(*khdr),
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size)) {
+ CERROR("checksum mismatch\n");
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
+ }
+
+ rawobj_free(&cksum);
+ return GSS_S_COMPLETE;
}
int gss_display_kerberos(struct gss_ctx *ctx,
struct krb5_ctx *kctx = ctx->internal_ctx_id;
int written;
- written = snprintf(buf, bufsize,
- " mech: krb5\n"
- " enctype: %s\n",
- enctype2str(kctx->kc_enctype));
+ written = snprintf(buf, bufsize, "krb5 (%s)",
+ enctype2str(kctx->kc_enctype));
return written;
}
.gss_verify_mic = gss_verify_mic_kerberos,
.gss_wrap = gss_wrap_kerberos,
.gss_unwrap = gss_unwrap_kerberos,
- .gss_plain_encrypt = gss_plain_encrypt_kerberos,
+ .gss_prep_bulk = gss_prep_bulk_kerberos,
+ .gss_wrap_bulk = gss_wrap_bulk_kerberos,
+ .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
.gss_display = gss_display_kerberos,
};
static struct subflavor_desc gss_kerberos_sfs[] = {
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
.sf_qop = 0,
- .sf_service = SPTLRPC_SVC_NONE,
- .sf_name = "krb5"
+ .sf_service = SPTLRPC_SVC_NULL,
+ .sf_name = "krb5n"
},
{
- .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
.sf_qop = 0,
.sf_service = SPTLRPC_SVC_AUTH,
+ .sf_name = "krb5a"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_INTG,
.sf_name = "krb5i"
},
{
.gm_oid = (rawobj_t)
{9, "\052\206\110\206\367\022\001\002\002"},
.gm_ops = &gss_kerberos_ops,
- .gm_sf_num = 3,
+ .gm_sf_num = 4,
.gm_sfs = gss_kerberos_sfs,
};
int __init init_kerberos_module(void)
{
- int status;
+ int status;
+
+ spin_lock_init(&krb5_seq_lock);
- status = lgss_mech_register(&gss_kerberos_mech);
- if (status)
- CERROR("Failed to register kerberos gss mechanism!\n");
- return status;
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
}
void __exit cleanup_kerberos_module(void)