X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_krb5_mech.c;h=09895ba43487e9e2f2a31d5ffed9ba98dbd44a42;hb=671c1b0c705640d63a1d3be7016c79afd10bc8df;hp=c6d85d0b317afb83f51161c6e4fc3d7173a001f5;hpb=2b294992edce5af7b79d4300ed3aa1ea6a8db850;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index c6d85d0..09895ba 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -3,7 +3,7 @@ * * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * - * Copyright (c) 2011, 2014, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. * * Author: Eric Mei */ @@ -341,7 +341,7 @@ __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end) if (p != end) goto out_err; - CDEBUG(D_SEC, "succesfully imported rfc1964 context\n"); + CDEBUG(D_SEC, "successfully imported rfc1964 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -403,7 +403,7 @@ __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end) if (get_keyblock(&p, end, &kctx->kc_keyc, keysize)) goto out_err; - CDEBUG(D_SEC, "succesfully imported v2 context\n"); + CDEBUG(D_SEC, "successfully imported v2 context\n"); return 0; out_err: return GSS_S_FAILURE; @@ -494,7 +494,7 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx, goto out_err; gctx_new->internal_ctx_id = knew; - CDEBUG(D_SEC, "succesfully copied reverse context\n"); + CDEBUG(D_SEC, "successfully copied reverse context\n"); return GSS_S_COMPLETE; out_err: @@ -602,6 +602,7 @@ int krb5_digest_hmac(struct crypto_hash *tfm, if (iovs[i].kiov_len == 0) continue; + sg_init_table(sg, 1); sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, iovs[i].kiov_offset); crypto_hash_update(&desc, sg, iovs[i].kiov_len); @@ -644,6 +645,7 @@ int krb5_digest_norm(struct crypto_hash *tfm, if (iovs[i].kiov_len == 0) continue; + sg_init_table(sg, 1); sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len, iovs[i].kiov_offset); crypto_hash_update(&desc, sg, iovs[i].kiov_len); @@ -947,8 +949,9 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, struct scatterlist src, dst; int blocksize, i, rc, nob = 0; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); + LASSERT(GET_ENC_KIOV(desc)); blocksize = crypto_blkcipher_blocksize(tfm); LASSERT(blocksize > 1); @@ -970,17 +973,20 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, /* encrypt clear pages */ for (i = 0; i < desc->bd_iov_count; i++) { - sg_set_page(&src, desc->bd_iov[i].kiov_page, - (desc->bd_iov[i].kiov_len + blocksize - 1) & + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_KIOV(desc, i).kiov_page, + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & (~(blocksize - 1)), - desc->bd_iov[i].kiov_offset); + BD_GET_KIOV(desc, i).kiov_offset); if (adj_nob) nob += src.length; - sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length, - src.offset); + sg_init_table(&dst, 1); + sg_set_page(&dst, BD_GET_ENC_KIOV(desc, i).kiov_page, + src.length, src.offset); - desc->bd_enc_iov[i].kiov_offset = dst.offset; - desc->bd_enc_iov[i].kiov_len = dst.length; + BD_GET_ENC_KIOV(desc, i).kiov_offset = dst.offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = dst.length; rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -1015,12 +1021,14 @@ int krb5_encrypt_bulk(struct crypto_blkcipher *tfm, * plain text size. * - for client read: we don't know data size for each page, so * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might - * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len. + * be smaller, so we need to adjust it according to + * bd_u.bd_kiov.bd_enc_vec[]->kiov_len. * this means we DO NOT support the situation that server send an odd size * data in a page which is not the last one. * - for server write: we knows exactly data size for each page being expected, * thus kiov_len is accurate already, so we should not adjust it at all. - * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which + * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be + * round_up(bd_iov[]->kiov_len) which * should have been done by prep_bulk(). */ static @@ -1037,8 +1045,9 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, int ct_nob = 0, pt_nob = 0; int blocksize, i, rc; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); + LASSERT(GET_ENC_KIOV(desc)); LASSERT(desc->bd_nob_transferred); blocksize = crypto_blkcipher_blocksize(tfm); @@ -1064,42 +1073,51 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; - i++) { - if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 || - desc->bd_enc_iov[i].kiov_len % blocksize != 0) { - CERROR("page %d: odd offset %u len %u, blocksize %d\n", - i, desc->bd_enc_iov[i].kiov_offset, - desc->bd_enc_iov[i].kiov_len, blocksize); - return -EFAULT; - } - - if (adj_nob) { - if (ct_nob + desc->bd_enc_iov[i].kiov_len > - desc->bd_nob_transferred) - desc->bd_enc_iov[i].kiov_len = - desc->bd_nob_transferred - ct_nob; + for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred; + i++) { + if (BD_GET_ENC_KIOV(desc, i).kiov_offset % blocksize + != 0 || + BD_GET_ENC_KIOV(desc, i).kiov_len % blocksize + != 0) { + CERROR("page %d: odd offset %u len %u, blocksize %d\n", + i, BD_GET_ENC_KIOV(desc, i).kiov_offset, + BD_GET_ENC_KIOV(desc, i).kiov_len, + blocksize); + return -EFAULT; + } - desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len; - if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob) - desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob; - } else { - /* this should be guaranteed by LNET */ - LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <= - desc->bd_nob_transferred); - LASSERT(desc->bd_iov[i].kiov_len <= - desc->bd_enc_iov[i].kiov_len); - } + if (adj_nob) { + if (ct_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob_transferred) + BD_GET_ENC_KIOV(desc, i).kiov_len = + desc->bd_nob_transferred - ct_nob; + + BD_GET_KIOV(desc, i).kiov_len = + BD_GET_ENC_KIOV(desc, i).kiov_len; + if (pt_nob + BD_GET_ENC_KIOV(desc, i).kiov_len > + desc->bd_nob) + BD_GET_KIOV(desc, i).kiov_len = + desc->bd_nob - pt_nob; + } else { + /* this should be guaranteed by LNET */ + LASSERT(ct_nob + BD_GET_ENC_KIOV(desc, i). + kiov_len <= + desc->bd_nob_transferred); + LASSERT(BD_GET_KIOV(desc, i).kiov_len <= + BD_GET_ENC_KIOV(desc, i).kiov_len); + } - if (desc->bd_enc_iov[i].kiov_len == 0) - continue; + if (BD_GET_ENC_KIOV(desc, i).kiov_len == 0) + continue; - sg_set_page(&src, desc->bd_enc_iov[i].kiov_page, - desc->bd_enc_iov[i].kiov_len, - desc->bd_enc_iov[i].kiov_offset); + sg_init_table(&src, 1); + sg_set_page(&src, BD_GET_ENC_KIOV(desc, i).kiov_page, + BD_GET_ENC_KIOV(desc, i).kiov_len, + BD_GET_ENC_KIOV(desc, i).kiov_offset); dst = src; - if (desc->bd_iov[i].kiov_len % blocksize == 0) - sg_assign_page(&dst, desc->bd_iov[i].kiov_page); + if (BD_GET_KIOV(desc, i).kiov_len % blocksize == 0) + sg_assign_page(&dst, + BD_GET_KIOV(desc, i).kiov_page); rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, src.length); @@ -1108,17 +1126,18 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return rc; } - if (desc->bd_iov[i].kiov_len % blocksize != 0) { - memcpy(page_address(desc->bd_iov[i].kiov_page) + - desc->bd_iov[i].kiov_offset, - page_address(desc->bd_enc_iov[i].kiov_page) + - desc->bd_iov[i].kiov_offset, - desc->bd_iov[i].kiov_len); - } + if (BD_GET_KIOV(desc, i).kiov_len % blocksize != 0) { + memcpy(page_address(BD_GET_KIOV(desc, i).kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + page_address(BD_GET_ENC_KIOV(desc, i). + kiov_page) + + BD_GET_KIOV(desc, i).kiov_offset, + BD_GET_KIOV(desc, i).kiov_len); + } - ct_nob += desc->bd_enc_iov[i].kiov_len; - pt_nob += desc->bd_iov[i].kiov_len; - } + ct_nob += BD_GET_ENC_KIOV(desc, i).kiov_len; + pt_nob += BD_GET_KIOV(desc, i).kiov_len; + } if (unlikely(ct_nob != desc->bd_nob_transferred)) { CERROR("%d cipher text transferred but only %d decrypted\n", @@ -1132,10 +1151,10 @@ int krb5_decrypt_bulk(struct crypto_blkcipher *tfm, return -EFAULT; } - /* if needed, clear up the rest unused iovs */ - if (adj_nob) - while (i < desc->bd_iov_count) - desc->bd_iov[i++].kiov_len = 0; + /* if needed, clear up the rest unused iovs */ + if (adj_nob) + while (i < desc->bd_iov_count) + BD_GET_KIOV(desc, i++).kiov_len = 0; /* decrypt tail (krb5 header) */ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr)); @@ -1300,35 +1319,38 @@ arc4_out: static __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx, - struct ptlrpc_bulk_desc *desc) + struct ptlrpc_bulk_desc *desc) { - struct krb5_ctx *kctx = gctx->internal_ctx_id; - int blocksize, i; + struct krb5_ctx *kctx = gctx->internal_ctx_id; + int blocksize, i; - LASSERT(desc->bd_iov_count); - LASSERT(desc->bd_enc_iov); - LASSERT(kctx->kc_keye.kb_tfm); + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + LASSERT(desc->bd_iov_count); + LASSERT(GET_ENC_KIOV(desc)); + LASSERT(kctx->kc_keye.kb_tfm); blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm); - for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page); - /* - * offset should always start at page boundary of either - * client or server side. - */ - if (desc->bd_iov[i].kiov_offset & blocksize) { - CERROR("odd offset %d in page %d\n", - desc->bd_iov[i].kiov_offset, i); - return GSS_S_FAILURE; - } + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page); + /* + * offset should always start at page boundary of either + * client or server side. + */ + if (BD_GET_KIOV(desc, i).kiov_offset & blocksize) { + CERROR("odd offset %d in page %d\n", + BD_GET_KIOV(desc, i).kiov_offset, i); + return GSS_S_FAILURE; + } - desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset; - desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len + - blocksize - 1) & (~(blocksize - 1)); - } + BD_GET_ENC_KIOV(desc, i).kiov_offset = + BD_GET_KIOV(desc, i).kiov_offset; + BD_GET_ENC_KIOV(desc, i).kiov_len = + (BD_GET_KIOV(desc, i).kiov_len + + blocksize - 1) & (~(blocksize - 1)); + } - return GSS_S_COMPLETE; + return GSS_S_COMPLETE; } static @@ -1345,6 +1367,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, __u8 conf[GSS_MAX_CIPHER_BLOCK]; int rc = 0; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK); @@ -1391,13 +1414,13 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx, data_desc[0].data = conf; data_desc[0].len = ke->ke_conf_size; - /* compute checksum */ - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, data_desc, - desc->bd_iov_count, desc->bd_iov, - &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); + /* compute checksum */ + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); /* * clear text layout for encryption: @@ -1624,6 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, int rc; __u32 major; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(ke); if (token->len < sizeof(*khdr)) { @@ -1681,12 +1705,13 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx, data_desc[0].data = plain.data; data_desc[0].len = blocksize; - if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, - khdr, 1, data_desc, - desc->bd_iov_count, desc->bd_iov, - &cksum)) - return GSS_S_FAILURE; - LASSERT(cksum.len >= ke->ke_hash_size); + if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi, + khdr, 1, data_desc, + desc->bd_iov_count, + GET_KIOV(desc), + &cksum)) + return GSS_S_FAILURE; + LASSERT(cksum.len >= ke->ke_hash_size); if (memcmp(plain.data + blocksize + sizeof(*khdr), cksum.data + cksum.len - ke->ke_hash_size,