X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_bulk.c;h=edc5928902a45a80bfc36d9140e0e3d8ffcd8684;hb=b2bb3b247d1dc75e25f1b5c14a333905909b5e70;hp=03fd0ce2a428dce27c59de0868929c2fc51e0598;hpb=6869932b552ac705f411de3362f01bd50c1f6f7d;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/gss_bulk.c b/lustre/ptlrpc/gss/gss_bulk.c index 03fd0ce..edc59289 100644 --- a/lustre/ptlrpc/gss/gss_bulk.c +++ b/lustre/ptlrpc/gss/gss_bulk.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,7 +24,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -38,9 +36,6 @@ * Author: Eric Mei */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC #ifdef __KERNEL__ #include @@ -48,7 +43,6 @@ #include #include #include -#include #include #include #else @@ -67,391 +61,26 @@ #include "gss_internal.h" #include "gss_api.h" -static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, }; - -static void buf_to_sl(struct scatterlist *sl, - void *buf, unsigned int len) -{ - sl->page = virt_to_page(buf); - sl->offset = offset_in_page(buf); - sl->length = len; -} - -/* - * CTS CBC encryption: - * 1. X(n-1) = P(n-1) - * 2. E(n-1) = Encrypt(K, X(n-1)) - * 3. C(n) = HEAD(E(n-1)) - * 4. P = P(n) | 0 - * 5. D(n) = E(n-1) XOR P - * 6. C(n-1) = Encrypt(K, D(n)) - * - * CTS encryption using standard CBC interface: - * 1. pad the last partial block with 0. - * 2. do CBC encryption. - * 3. swap the last two ciphertext blocks. - * 4. truncate to original plaintext size. - */ -static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm, - struct scatterlist *sld, - struct scatterlist *sls) -{ - struct scatterlist slst, sldt; - struct blkcipher_desc desc; - void *data; - __u8 sbuf[CIPHER_MAX_BLKSIZE]; - __u8 dbuf[CIPHER_MAX_BLKSIZE]; - unsigned int blksize, blks, tail; - int rc; - - blksize = ll_crypto_blkcipher_blocksize(tfm); - blks = sls->length / blksize; - tail = sls->length % blksize; - LASSERT(blks > 0 && tail > 0); - - /* pad tail block with 0, copy to sbuf */ - data = cfs_kmap(sls->page); - memcpy(sbuf, data + sls->offset + blks * blksize, tail); - memset(sbuf + tail, 0, blksize - tail); - cfs_kunmap(sls->page); - - buf_to_sl(&slst, sbuf, blksize); - buf_to_sl(&sldt, dbuf, blksize); - desc.tfm = tfm; - desc.flags = 0; - - /* encrypt head */ - rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail); - if (unlikely(rc)) { - CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc); - return rc; - } - /* encrypt tail */ - rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize); - if (unlikely(rc)) { - CERROR("encrypt tail (%u) data: %d\n", slst.length, rc); - return rc; - } - - /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */ - data = cfs_kmap(sld->page); - - memcpy(data + sld->offset + blks * blksize, - data + sld->offset + (blks - 1) * blksize, tail); - memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize); - cfs_kunmap(sld->page); - - return 0; -} - -/* - * CTS CBC decryption: - * 1. D(n) = Decrypt(K, C(n-1)) - * 2. C = C(n) | 0 - * 3. X(n) = D(n) XOR C - * 4. P(n) = HEAD(X(n)) - * 5. E(n-1) = C(n) | TAIL(X(n)) - * 6. X(n-1) = Decrypt(K, E(n-1)) - * 7. P(n-1) = X(n-1) XOR C(n-2) - * - * CTS decryption using standard CBC interface: - * 1. D(n) = Decrypt(K, C(n-1)) - * 2. C(n) = C(n) | TAIL(D(n)) - * 3. swap the last two ciphertext blocks. - * 4. do CBC decryption. - * 5. truncate to original ciphertext size. - */ -static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm, - struct scatterlist *sld, - struct scatterlist *sls) -{ - struct blkcipher_desc desc; - struct scatterlist slst, sldt; - void *data; - __u8 sbuf[CIPHER_MAX_BLKSIZE]; - __u8 dbuf[CIPHER_MAX_BLKSIZE]; - unsigned int blksize, blks, tail; - int rc; - - blksize = ll_crypto_blkcipher_blocksize(tfm); - blks = sls->length / blksize; - tail = sls->length % blksize; - LASSERT(blks > 0 && tail > 0); - - /* save current IV, and set IV to zero */ - ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize); - ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize); - - /* D(n) = Decrypt(K, C(n-1)) */ - slst = *sls; - slst.offset += (blks - 1) * blksize; - slst.length = blksize; - - buf_to_sl(&sldt, dbuf, blksize); - desc.tfm = tfm; - desc.flags = 0; - - rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize); - if (unlikely(rc)) { - CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc); - return rc; - } - - /* restore IV */ - ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize); - - data = cfs_kmap(sls->page); - /* C(n) = C(n) | TAIL(D(n)) */ - memcpy(dbuf, data + sls->offset + blks * blksize, tail); - /* swab C(n) and C(n-1) */ - memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize); - memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize); - cfs_kunmap(sls->page); - - /* do cbc decrypt */ - buf_to_sl(&slst, sbuf, blksize); - buf_to_sl(&sldt, dbuf, blksize); - - /* decrypt head */ - rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail); - if (unlikely(rc)) { - CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc); - return rc; - } - /* decrypt tail */ - rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize); - if (unlikely(rc)) { - CERROR("decrypt tail (%u) data: %d\n", slst.length, rc); - return rc; - } - - /* truncate to original ciphertext size */ - data = cfs_kmap(sld->page); - memcpy(data + sld->offset + blks * blksize, dbuf, tail); - cfs_kunmap(sld->page); - - return 0; -} - -static inline int do_cts_tfm(struct ll_crypto_cipher *tfm, - int encrypt, - struct scatterlist *sld, - struct scatterlist *sls) -{ -#ifndef HAVE_ASYNC_BLOCK_CIPHER - LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC); -#endif - - if (encrypt) - return cbc_cts_encrypt(tfm, sld, sls); - else - return cbc_cts_decrypt(tfm, sld, sls); -} - -/* - * normal encrypt/decrypt of data of even blocksize - */ -static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm, - int encrypt, - struct scatterlist *sld, - struct scatterlist *sls) -{ - struct blkcipher_desc desc; - desc.tfm = tfm; - desc.flags = 0; - if (encrypt) - return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length); - else - return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length); -} - -static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen) -{ - const struct sptlrpc_ciph_type *ct; - struct ll_crypto_cipher *tfm; - int rc; - - /* using ARC4, the only stream cipher in linux for now */ - ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4); - LASSERT(ct); - - tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0); - if (tfm == NULL) { - CERROR("Failed to allocate stream TFM %s\n", ct->sct_name); - return NULL; - } - LASSERT(ll_crypto_blkcipher_blocksize(tfm)); - - if (keylen > ct->sct_keysize) - keylen = ct->sct_keysize; - - LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm)); - LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm)); - - rc = ll_crypto_blkcipher_setkey(tfm, key, keylen); - if (rc) { - CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc); - ll_crypto_free_blkcipher(tfm); - return NULL; - } - - return tfm; -} - -static int do_bulk_privacy(struct gss_ctx *gctx, - struct ptlrpc_bulk_desc *desc, - int encrypt, __u32 alg, - struct ptlrpc_bulk_sec_desc *bsd) -{ - const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg); - struct ll_crypto_cipher *tfm; - struct ll_crypto_cipher *stfm = NULL; /* backup stream cipher */ - struct scatterlist sls, sld, *sldp; - unsigned int blksize, keygen_size; - int i, rc; - __u8 key[CIPHER_MAX_KEYSIZE]; - - LASSERT(ct); - - if (encrypt) - bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL; - - if (alg == BULK_CIPH_ALG_NULL) - return 0; - - if (desc->bd_iov_count <= 0) { - if (encrypt) - bsd->bsd_ciph_alg = alg; - return 0; - } - - tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 ); - if (tfm == NULL) { - CERROR("Failed to allocate TFM %s\n", ct->sct_name); - return -ENOMEM; - } - blksize = ll_crypto_blkcipher_blocksize(tfm); - - LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize); - LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize); - LASSERT(ct->sct_ivsize == 0 || - ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize); - LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE); - LASSERT(blksize <= CIPHER_MAX_BLKSIZE); - - /* generate ramdom key seed and compute the secret key based on it. - * note determined by algorithm which lgss_plain_encrypt use, it - * might require the key size be its (blocksize * n). so here for - * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */ - keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) & - ~(CIPHER_MAX_BLKSIZE - 1); - if (encrypt) { - get_random_bytes(bsd->bsd_key, ct->sct_keysize); - if (ct->sct_keysize < keygen_size) - memset(bsd->bsd_key + ct->sct_keysize, 0, - keygen_size - ct->sct_keysize); - } - - rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key); - if (rc) { - CERROR("failed to compute secret key: %d\n", rc); - goto out; - } - - rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize); - if (rc) { - CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc); - goto out; - } - - /* stream cipher doesn't need iv */ - if (blksize > 1) - ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize); - - for (i = 0; i < desc->bd_iov_count; i++) { - sls.page = desc->bd_iov[i].kiov_page; - sls.offset = desc->bd_iov[i].kiov_offset; - sls.length = desc->bd_iov[i].kiov_len; - - if (unlikely(sls.length == 0)) { - CWARN("page %d with 0 length data?\n", i); - continue; - } - - if (unlikely(sls.offset % blksize)) { - CERROR("page %d with odd offset %u, TFM %s\n", - i, sls.offset, ct->sct_name); - rc = -EINVAL; - goto out; - } - - if (desc->bd_enc_pages) { - sld.page = desc->bd_enc_pages[i]; - sld.offset = desc->bd_iov[i].kiov_offset; - sld.length = desc->bd_iov[i].kiov_len; - - sldp = &sld; - } else { - sldp = &sls; - } - - if (likely(sls.length % blksize == 0)) { - /* data length is n * blocksize, do the normal tfm */ - rc = do_cipher_tfm(tfm, encrypt, sldp, &sls); - } else if (sls.length < blksize) { - /* odd data length, and smaller than 1 block, CTS - * doesn't work in this case because it requires - * transfer a modified IV to peer. here we use a - * "backup" stream cipher to do the tfm */ - if (stfm == NULL) { - stfm = get_stream_cipher(key, ct->sct_keysize); - if (tfm == NULL) { - rc = -ENOMEM; - goto out; - } - } - rc = do_cipher_tfm(stfm, encrypt, sldp, &sls); - } else { - /* odd data length but > 1 block, do CTS tfm */ - rc = do_cts_tfm(tfm, encrypt, sldp, &sls); - } - - if (unlikely(rc)) { - CERROR("error %s page %d/%d: %d\n", - encrypt ? "encrypt" : "decrypt", - i + 1, desc->bd_iov_count, rc); - goto out; - } - - if (desc->bd_enc_pages) - desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i]; - } - - if (encrypt) - bsd->bsd_ciph_alg = alg; - -out: - if (stfm) - ll_crypto_free_blkcipher(stfm); - - ll_crypto_free_blkcipher(tfm); - return rc; -} - int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_cli_ctx *gctx; struct lustre_msg *msg; - struct ptlrpc_bulk_sec_desc *bsdr; - int offset, rc; + struct ptlrpc_bulk_sec_desc *bsd; + rawobj_t token; + __u32 maj; + int offset; + int rc; ENTRY; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); - switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { + gctx = container_of(ctx, struct gss_cli_ctx, gc_base); + LASSERT(gctx->gc_mechctx); + + switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: LASSERT(req->rq_reqbuf->lm_bufcount >= 3); msg = req->rq_reqbuf; @@ -472,42 +101,68 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx, LBUG(); } - /* make checksum */ - rc = bulk_csum_cli_request(desc, req->rq_bulk_read, - req->rq_flvr.sf_bulk_hash, msg, offset); - if (rc) { - CERROR("client bulk %s: failed to generate checksum: %d\n", - req->rq_bulk_read ? "read" : "write", rc); - RETURN(rc); - } + bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); + bsd->bsd_version = 0; + bsd->bsd_flags = 0; + bsd->bsd_type = SPTLRPC_BULK_DEFAULT; + bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc); - if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL) + if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL) RETURN(0); - /* previous bulk_csum_cli_request() has verified bsdr is good */ - bsdr = lustre_msg_buf(msg, offset, 0); + LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG || + bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV); if (req->rq_bulk_read) { - bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph; - RETURN(0); - } + /* + * bulk read: prepare receiving pages only for privacy mode. + */ + if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV) + return gss_cli_prep_bulk(req, desc); + } else { + /* + * bulk write: sign or encrypt bulk pages. + */ + bsd->bsd_nob = desc->bd_nob; + + if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) { + /* integrity mode */ + token.data = bsd->bsd_data; + token.len = lustre_msg_buflen(msg, offset) - + sizeof(*bsd); + + maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL, + desc->bd_iov_count, desc->bd_iov, + &token); + if (maj != GSS_S_COMPLETE) { + CWARN("failed to sign bulk data: %x\n", maj); + RETURN(-EACCES); + } + } else { + /* privacy mode */ + if (desc->bd_iov_count == 0) + RETURN(0); + + rc = sptlrpc_enc_pool_get_pages(desc); + if (rc) { + CERROR("bulk write: failed to allocate " + "encryption pages: %d\n", rc); + RETURN(rc); + } - /* it turn out to be bulk write */ - rc = sptlrpc_enc_pool_get_pages(desc); - if (rc) { - CERROR("bulk write: failed to allocate encryption pages\n"); - RETURN(rc); - } + token.data = bsd->bsd_data; + token.len = lustre_msg_buflen(msg, offset) - + sizeof(*bsd); - gctx = container_of(ctx, struct gss_cli_ctx, gc_base); - LASSERT(gctx->gc_mechctx); - - rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1, - req->rq_flvr.sf_bulk_ciph, bsdr); - if (rc) - CERROR("bulk write: client failed to encrypt pages\n"); + maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0); + if (maj != GSS_S_COMPLETE) { + CWARN("fail to encrypt bulk data: %x\n", maj); + RETURN(-EACCES); + } + } + } - RETURN(rc); + RETURN(0); } int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, @@ -517,73 +172,200 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, struct gss_cli_ctx *gctx; struct lustre_msg *rmsg, *vmsg; struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; - int roff, voff, rc; + rawobj_t token; + __u32 maj; + int roff, voff; ENTRY; LASSERT(req->rq_pack_bulk); LASSERT(req->rq_bulk_read || req->rq_bulk_write); - switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { + switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { case SPTLRPC_SVC_NULL: vmsg = req->rq_repdata; + LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3); voff = vmsg->lm_bufcount - 1; - LASSERT(vmsg && vmsg->lm_bufcount >= 3); rmsg = req->rq_reqbuf; + LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3); roff = rmsg->lm_bufcount - 1; /* last segment */ - LASSERT(rmsg && rmsg->lm_bufcount >= 3); break; case SPTLRPC_SVC_AUTH: case SPTLRPC_SVC_INTG: vmsg = req->rq_repdata; + LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4); voff = vmsg->lm_bufcount - 2; - LASSERT(vmsg && vmsg->lm_bufcount >= 4); rmsg = req->rq_reqbuf; + LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4); roff = rmsg->lm_bufcount - 2; /* second last segment */ - LASSERT(rmsg && rmsg->lm_bufcount >= 4); break; case SPTLRPC_SVC_PRIV: vmsg = req->rq_repdata; + LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2); voff = vmsg->lm_bufcount - 1; - LASSERT(vmsg && vmsg->lm_bufcount >= 2); rmsg = req->rq_clrbuf; + LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2); roff = rmsg->lm_bufcount - 1; /* last segment */ - LASSERT(rmsg && rmsg->lm_bufcount >= 2); break; default: LBUG(); } - if (req->rq_bulk_read) { - bsdr = lustre_msg_buf(rmsg, roff, 0); - if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL) - goto verify_csum; - - bsdv = lustre_msg_buf(vmsg, voff, 0); - if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) { - CERROR("bulk read: cipher algorithm mismatch: client " - "request %s but server reply with %s. try to " - "use the new one for decryption\n", - sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg), - sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg)); + bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr)); + bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv)); + LASSERT(bsdr && bsdv); + + if (bsdr->bsd_version != bsdv->bsd_version || + bsdr->bsd_type != bsdv->bsd_type || + bsdr->bsd_svc != bsdv->bsd_svc) { + CERROR("bulk security descriptor mismatch: " + "(%u,%u,%u) != (%u,%u,%u)\n", + bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc, + bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc); + RETURN(-EPROTO); + } + + LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL || + bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG || + bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV); + + /* + * in privacy mode if return success, make sure bd_nob_transferred + * is the actual size of the clear text, otherwise upper layer + * may be surprised. + */ + if (req->rq_bulk_write) { + if (bsdv->bsd_flags & BSD_FL_ERR) { + CERROR("server reported bulk i/o failure\n"); + RETURN(-EIO); } + if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) + desc->bd_nob_transferred = desc->bd_nob; + } else { + /* + * bulk read, upon return success, bd_nob_transferred is + * the size of plain text actually received. + */ gctx = container_of(ctx, struct gss_cli_ctx, gc_base); LASSERT(gctx->gc_mechctx); - rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0, - bsdv->bsd_ciph_alg, bsdv); - if (rc) { - CERROR("bulk read: client failed to decrypt data\n"); - RETURN(rc); + if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) { + int i, nob; + + /* fix the actual data size */ + for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { + if (desc->bd_iov[i].kiov_len + nob > + desc->bd_nob_transferred) { + desc->bd_iov[i].kiov_len = + desc->bd_nob_transferred - nob; + } + nob += desc->bd_iov[i].kiov_len; + } + + token.data = bsdv->bsd_data; + token.len = lustre_msg_buflen(vmsg, voff) - + sizeof(*bsdv); + + maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL, + desc->bd_iov_count, desc->bd_iov, + &token); + if (maj != GSS_S_COMPLETE) { + CERROR("failed to verify bulk read: %x\n", maj); + RETURN(-EACCES); + } + } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) { + desc->bd_nob = bsdv->bsd_nob; + if (desc->bd_nob == 0) + RETURN(0); + + token.data = bsdv->bsd_data; + token.len = lustre_msg_buflen(vmsg, voff) - + sizeof(*bsdr); + + maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, + &token, 1); + if (maj != GSS_S_COMPLETE) { + CERROR("failed to decrypt bulk read: %x\n", + maj); + RETURN(-EACCES); + } + + desc->bd_nob_transferred = desc->bd_nob; } } -verify_csum: - rc = bulk_csum_cli_reply(desc, req->rq_bulk_read, - rmsg, roff, vmsg, voff); + RETURN(0); +} + +static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc, + struct gss_ctx *mechctx) +{ + int rc; + + if (desc->bd_iov_count == 0) + return 0; + + rc = sptlrpc_enc_pool_get_pages(desc); + if (rc) + return rc; + + if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE) + return -EACCES; + + return 0; +} + +int gss_cli_prep_bulk(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc) +{ + int rc; + ENTRY; + + LASSERT(req->rq_cli_ctx); + LASSERT(req->rq_pack_bulk); + LASSERT(req->rq_bulk_read); + + if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV) + RETURN(0); + + rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx); + if (rc) + CERROR("bulk read: failed to prepare encryption " + "pages: %d\n", rc); + + RETURN(rc); +} + +int gss_svc_prep_bulk(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc) +{ + struct gss_svc_reqctx *grctx; + struct ptlrpc_bulk_sec_desc *bsd; + int rc; + ENTRY; + + LASSERT(req->rq_svc_ctx); + LASSERT(req->rq_pack_bulk); + LASSERT(req->rq_bulk_write); + + grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx); + LASSERT(grctx->src_reqbsd); + LASSERT(grctx->src_repbsd); + LASSERT(grctx->src_ctx); + LASSERT(grctx->src_ctx->gsc_mechctx); + + bsd = grctx->src_reqbsd; + if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV) + RETURN(0); + + rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx); + if (rc) + CERROR("bulk write: failed to prepare encryption " + "pages: %d\n", rc); + RETURN(rc); } @@ -591,7 +373,9 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_svc_reqctx *grctx; - int rc; + struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; + rawobj_t token; + __u32 maj; ENTRY; LASSERT(req->rq_svc_ctx); @@ -605,29 +389,64 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req, LASSERT(grctx->src_ctx); LASSERT(grctx->src_ctx->gsc_mechctx); - /* decrypt bulk data if it's encrypted */ - if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) { - rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0, - grctx->src_reqbsd->bsd_ciph_alg, - grctx->src_reqbsd); - if (rc) { - CERROR("bulk write: server failed to decrypt data\n"); - RETURN(rc); + bsdr = grctx->src_reqbsd; + bsdv = grctx->src_repbsd; + + /* bsdr has been sanity checked during unpacking */ + bsdv->bsd_version = 0; + bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; + bsdv->bsd_svc = bsdr->bsd_svc; + bsdv->bsd_flags = 0; + + switch (bsdv->bsd_svc) { + case SPTLRPC_BULK_SVC_INTG: + token.data = bsdr->bsd_data; + token.len = grctx->src_reqbsd_size - sizeof(*bsdr); + + maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, + desc->bd_iov_count, desc->bd_iov, &token); + if (maj != GSS_S_COMPLETE) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("failed to verify bulk signature: %x\n", maj); + RETURN(-EACCES); + } + break; + case SPTLRPC_BULK_SVC_PRIV: + if (bsdr->bsd_nob != desc->bd_nob) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("prepared nob %d doesn't match the actual " + "nob %d\n", desc->bd_nob, bsdr->bsd_nob); + RETURN(-EPROTO); } - } - /* verify bulk data checksum */ - rc = bulk_csum_svc(desc, req->rq_bulk_read, - grctx->src_reqbsd, grctx->src_reqbsd_size, - grctx->src_repbsd, grctx->src_repbsd_size); + if (desc->bd_iov_count == 0) { + LASSERT(desc->bd_nob == 0); + break; + } - RETURN(rc); + token.data = bsdr->bsd_data; + token.len = grctx->src_reqbsd_size - sizeof(*bsdr); + + maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx, + desc, &token, 0); + if (maj != GSS_S_COMPLETE) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("failed decrypt bulk data: %x\n", maj); + RETURN(-EACCES); + } + break; + } + + RETURN(0); } int gss_svc_wrap_bulk(struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) { struct gss_svc_reqctx *grctx; + struct ptlrpc_bulk_sec_desc *bsdr, *bsdv; + rawobj_t token; + __u32 maj; int rc; ENTRY; @@ -642,22 +461,56 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req, LASSERT(grctx->src_ctx); LASSERT(grctx->src_ctx->gsc_mechctx); - /* generate bulk data checksum */ - rc = bulk_csum_svc(desc, req->rq_bulk_read, - grctx->src_reqbsd, grctx->src_reqbsd_size, - grctx->src_repbsd, grctx->src_repbsd_size); - if (rc) - RETURN(rc); - - /* encrypt bulk data if required */ - if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) { - rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1, - grctx->src_reqbsd->bsd_ciph_alg, - grctx->src_repbsd); - if (rc) - CERROR("bulk read: server failed to encrypt data: " - "rc %d\n", rc); + bsdr = grctx->src_reqbsd; + bsdv = grctx->src_repbsd; + + /* bsdr has been sanity checked during unpacking */ + bsdv->bsd_version = 0; + bsdv->bsd_type = SPTLRPC_BULK_DEFAULT; + bsdv->bsd_svc = bsdr->bsd_svc; + bsdv->bsd_flags = 0; + + switch (bsdv->bsd_svc) { + case SPTLRPC_BULK_SVC_INTG: + token.data = bsdv->bsd_data; + token.len = grctx->src_repbsd_size - sizeof(*bsdv); + + maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL, + desc->bd_iov_count, desc->bd_iov, &token); + if (maj != GSS_S_COMPLETE) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("failed to sign bulk data: %x\n", maj); + RETURN(-EACCES); + } + break; + case SPTLRPC_BULK_SVC_PRIV: + bsdv->bsd_nob = desc->bd_nob; + + if (desc->bd_iov_count == 0) { + LASSERT(desc->bd_nob == 0); + break; + } + + rc = sptlrpc_enc_pool_get_pages(desc); + if (rc) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("bulk read: failed to allocate encryption " + "pages: %d\n", rc); + RETURN(rc); + } + + token.data = bsdv->bsd_data; + token.len = grctx->src_repbsd_size - sizeof(*bsdv); + + maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx, + desc, &token, 1); + if (maj != GSS_S_COMPLETE) { + bsdv->bsd_flags |= BSD_FL_ERR; + CERROR("failed to encrypt bulk data: %x\n", maj); + RETURN(-EACCES); + } + break; } - RETURN(rc); + RETURN(0); }