Whamcloud - gitweb
LU-3333 ptlrpc: Protect request buffer changing
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
index 9f829ad..edc5928 100644 (file)
@@ -1,28 +1,41 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
  *
- * Copyright (C) 2006 Cluster File Systems, Inc.
- *   Author: Eric Mei <ericm@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_bulk.c
+ *
+ * Author: Eric Mei <eric.mei@sun.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_SEC
 #ifdef __KERNEL__
 #include <linux/init.h>
@@ -30,7 +43,6 @@
 #include <linux/slab.h>
 #include <linux/dcache.h>
 #include <linux/fs.h>
-#include <linux/random.h>
 #include <linux/mutex.h>
 #include <linux/crypto.h>
 #else
 #include "gss_internal.h"
 #include "gss_api.h"
 
-static
-int do_bulk_privacy(struct gss_ctx *gctx,
-                    struct ptlrpc_bulk_desc *desc,
-                    int encrypt, __u32 alg,
-                    struct ptlrpc_bulk_sec_desc *bsd)
-{
-        struct crypto_tfm  *tfm;
-        struct scatterlist  sg, sg2, *sgd;
-        unsigned int        blksize;
-        int                 i, rc;
-        __u8                local_iv[sizeof(bsd->bsd_iv)];
-
-        LASSERT(alg < BULK_PRIV_ALG_MAX);
-
-        if (encrypt)
-                bsd->bsd_priv_alg = BULK_PRIV_ALG_NULL;
-
-        if (alg == BULK_PRIV_ALG_NULL)
-                return 0;
-
-        tfm = crypto_alloc_tfm(sptlrpc_bulk_priv_alg2name(alg),
-                               sptlrpc_bulk_priv_alg2flags(alg));
-        if (tfm == NULL) {
-                CERROR("Failed to allocate TFM %s\n",
-                       sptlrpc_bulk_priv_alg2name(alg));
-                return -ENOMEM;
-        }
-
-        blksize = crypto_tfm_alg_blocksize(tfm);
-        LASSERT(blksize <= sizeof(local_iv));
-
-        if (encrypt)
-                get_random_bytes(bsd->bsd_iv, sizeof(bsd->bsd_iv));
-
-        /* compute the secret iv */
-        rc = lgss_plain_encrypt(gctx, 0,
-                                sizeof(local_iv), bsd->bsd_iv, local_iv);
-        if (rc) {
-                CERROR("failed to compute secret iv: %d\n", rc);
-                goto out;
-        }
-
-        rc = crypto_cipher_setkey(tfm, local_iv, sizeof(local_iv));
-        if (rc) {
-                CERROR("Failed to set key for TFM %s: %d\n",
-                       sptlrpc_bulk_priv_alg2name(alg), rc);
-                goto out;
-        }
-
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                sg.page = desc->bd_iov[i].kiov_page;
-                sg.offset = desc->bd_iov[i].kiov_offset;
-                sg.length = desc->bd_iov[i].kiov_len;
-
-                if (desc->bd_enc_pages) {
-                        sg2.page = desc->bd_enc_pages[i];
-                        sg2.offset = desc->bd_iov[i].kiov_offset;
-                        sg2.length = desc->bd_iov[i].kiov_len;
-
-                        sgd = &sg2;
-                } else
-                        sgd = &sg;
-
-                if (encrypt)
-                        rc = crypto_cipher_encrypt(tfm, sgd, &sg, sg.length);
-                else
-                        rc = crypto_cipher_decrypt(tfm, sgd, &sg, sg.length);
-
-                LASSERT(rc == 0);
-
-                if (desc->bd_enc_pages)
-                        desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
-
-                /* although the procedure might be lengthy, the crypto functions
-                 * internally called cond_resched() from time to time.
-                 */
-        }
-
-        if (encrypt)
-                bsd->bsd_priv_alg = alg;
-
-out:
-        crypto_free_tfm(tfm);
-        return rc;
-}
-
 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
                           struct ptlrpc_request *req,
                           struct ptlrpc_bulk_desc *desc)
 {
         struct gss_cli_ctx              *gctx;
         struct lustre_msg               *msg;
-        struct ptlrpc_bulk_sec_desc     *bsdr;
-        int                              offset, rc;
+        struct ptlrpc_bulk_sec_desc     *bsd;
+        rawobj_t                         token;
+        __u32                            maj;
+        int                              offset;
+        int                              rc;
         ENTRY;
 
         LASSERT(req->rq_pack_bulk);
         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 
-        switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+        gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+        LASSERT(gctx->gc_mechctx);
+
+        switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
         case SPTLRPC_SVC_NULL:
                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
                 msg = req->rq_reqbuf;
@@ -169,42 +101,68 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
                 LBUG();
         }
 
-        /* make checksum */
-        rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
-                                   req->rq_flvr.sf_bulk_csum, msg, offset);
-        if (rc) {
-                CERROR("client bulk %s: failed to generate checksum: %d\n",
-                       req->rq_bulk_read ? "read" : "write", rc);
-                RETURN(rc);
-        }
+        bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
+        bsd->bsd_version = 0;
+        bsd->bsd_flags = 0;
+        bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+        bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
 
-        if (req->rq_flvr.sf_bulk_priv == BULK_PRIV_ALG_NULL)
+        if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
                 RETURN(0);
 
-        /* previous bulk_csum_cli_request() has verified bsdr is good */
-        bsdr = lustre_msg_buf(msg, offset, 0);
+        LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+                bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
 
         if (req->rq_bulk_read) {
-                bsdr->bsd_priv_alg = req->rq_flvr.sf_bulk_priv;
-                RETURN(0);
-        }
-
-        /* it turn out to be bulk write */
-        rc = sptlrpc_enc_pool_get_pages(desc);
-        if (rc) {
-                CERROR("bulk write: failed to allocate encryption pages\n");
-                RETURN(rc);
+                /*
+                 * bulk read: prepare receiving pages only for privacy mode.
+                 */
+                if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+                        return gss_cli_prep_bulk(req, desc);
+        } else {
+                /*
+                 * bulk write: sign or encrypt bulk pages.
+                 */
+                bsd->bsd_nob = desc->bd_nob;
+
+                if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+                        /* integrity mode */
+                        token.data = bsd->bsd_data;
+                        token.len = lustre_msg_buflen(msg, offset) -
+                                    sizeof(*bsd);
+
+                        maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
+                                           desc->bd_iov_count, desc->bd_iov,
+                                           &token);
+                        if (maj != GSS_S_COMPLETE) {
+                                CWARN("failed to sign bulk data: %x\n", maj);
+                                RETURN(-EACCES);
+                        }
+                } else {
+                        /* privacy mode */
+                        if (desc->bd_iov_count == 0)
+                                RETURN(0);
+
+                        rc = sptlrpc_enc_pool_get_pages(desc);
+                        if (rc) {
+                                CERROR("bulk write: failed to allocate "
+                                       "encryption pages: %d\n", rc);
+                                RETURN(rc);
+                        }
+
+                        token.data = bsd->bsd_data;
+                        token.len = lustre_msg_buflen(msg, offset) -
+                                    sizeof(*bsd);
+
+                        maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
+                        if (maj != GSS_S_COMPLETE) {
+                                CWARN("fail to encrypt bulk data: %x\n", maj);
+                                RETURN(-EACCES);
+                        }
+                }
         }
 
-        gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
-        LASSERT(gctx->gc_mechctx);
-
-        rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
-                             req->rq_flvr.sf_bulk_priv, bsdr);
-        if (rc)
-                CERROR("bulk write: client failed to encrypt pages\n");
-
-        RETURN(rc);
+        RETURN(0);
 }
 
 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
@@ -214,73 +172,200 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
         struct gss_cli_ctx              *gctx;
         struct lustre_msg               *rmsg, *vmsg;
         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
-        int                              roff, voff, rc;
+        rawobj_t                         token;
+        __u32                            maj;
+        int                              roff, voff;
         ENTRY;
 
         LASSERT(req->rq_pack_bulk);
         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 
-        switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+        switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
         case SPTLRPC_SVC_NULL:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
+               LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
                 voff = vmsg->lm_bufcount - 1;
-                LASSERT(vmsg && vmsg->lm_bufcount >= 3);
 
                 rmsg = req->rq_reqbuf;
+               LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
                 roff = rmsg->lm_bufcount - 1; /* last segment */
-                LASSERT(rmsg && rmsg->lm_bufcount >= 3);
                 break;
         case SPTLRPC_SVC_AUTH:
         case SPTLRPC_SVC_INTG:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
+               LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
                 voff = vmsg->lm_bufcount - 2;
-                LASSERT(vmsg && vmsg->lm_bufcount >= 4);
 
                 rmsg = req->rq_reqbuf;
+               LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
                 roff = rmsg->lm_bufcount - 2; /* second last segment */
-                LASSERT(rmsg && rmsg->lm_bufcount >= 4);
                 break;
         case SPTLRPC_SVC_PRIV:
-                vmsg = req->rq_repbuf;
+                vmsg = req->rq_repdata;
+               LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
                 voff = vmsg->lm_bufcount - 1;
-                LASSERT(vmsg && vmsg->lm_bufcount >= 2);
 
                 rmsg = req->rq_clrbuf;
+               LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
                 roff = rmsg->lm_bufcount - 1; /* last segment */
-                LASSERT(rmsg && rmsg->lm_bufcount >= 2);
                 break;
         default:
                 LBUG();
         }
 
-        if (req->rq_bulk_read) {
-                bsdr = lustre_msg_buf(rmsg, roff, 0);
-                if (bsdr->bsd_priv_alg == BULK_PRIV_ALG_NULL)
-                        goto verify_csum;
-
-                bsdv = lustre_msg_buf(vmsg, voff, 0);
-                if (bsdr->bsd_priv_alg != bsdv->bsd_priv_alg) {
-                        CERROR("bulk read: cipher algorithm mismatch: client "
-                               "request %s but server reply with %s. try to "
-                               "use the new one for decryption\n",
-                               sptlrpc_bulk_priv_alg2name(bsdr->bsd_priv_alg),
-                               sptlrpc_bulk_priv_alg2name(bsdv->bsd_priv_alg));
+        bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
+        bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
+        LASSERT(bsdr && bsdv);
+
+        if (bsdr->bsd_version != bsdv->bsd_version ||
+            bsdr->bsd_type != bsdv->bsd_type ||
+            bsdr->bsd_svc != bsdv->bsd_svc) {
+                CERROR("bulk security descriptor mismatch: "
+                       "(%u,%u,%u) != (%u,%u,%u)\n",
+                       bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
+                       bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
+                RETURN(-EPROTO);
+        }
+
+        LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
+                bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+                bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
+
+        /*
+         * in privacy mode if return success, make sure bd_nob_transferred
+         * is the actual size of the clear text, otherwise upper layer
+         * may be surprised.
+         */
+        if (req->rq_bulk_write) {
+                if (bsdv->bsd_flags & BSD_FL_ERR) {
+                        CERROR("server reported bulk i/o failure\n");
+                        RETURN(-EIO);
                 }
 
+                if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+                        desc->bd_nob_transferred = desc->bd_nob;
+        } else {
+                /*
+                 * bulk read, upon return success, bd_nob_transferred is
+                 * the size of plain text actually received.
+                 */
                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
                 LASSERT(gctx->gc_mechctx);
 
-                rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
-                                     bsdv->bsd_priv_alg, bsdv);
-                if (rc) {
-                        CERROR("bulk read: client failed to decrypt data\n");
-                        RETURN(rc);
+                if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+                        int i, nob;
+
+                        /* fix the actual data size */
+                        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+                                if (desc->bd_iov[i].kiov_len + nob >
+                                    desc->bd_nob_transferred) {
+                                        desc->bd_iov[i].kiov_len =
+                                                desc->bd_nob_transferred - nob;
+                                }
+                                nob += desc->bd_iov[i].kiov_len;
+                        }
+
+                        token.data = bsdv->bsd_data;
+                        token.len = lustre_msg_buflen(vmsg, voff) -
+                                    sizeof(*bsdv);
+
+                        maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
+                                              desc->bd_iov_count, desc->bd_iov,
+                                              &token);
+                        if (maj != GSS_S_COMPLETE) {
+                                CERROR("failed to verify bulk read: %x\n", maj);
+                                RETURN(-EACCES);
+                        }
+                } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
+                        desc->bd_nob = bsdv->bsd_nob;
+                        if (desc->bd_nob == 0)
+                                RETURN(0);
+
+                        token.data = bsdv->bsd_data;
+                        token.len = lustre_msg_buflen(vmsg, voff) -
+                                    sizeof(*bsdr);
+
+                        maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
+                                               &token, 1);
+                        if (maj != GSS_S_COMPLETE) {
+                                CERROR("failed to decrypt bulk read: %x\n",
+                                       maj);
+                                RETURN(-EACCES);
+                        }
+
+                        desc->bd_nob_transferred = desc->bd_nob;
                 }
         }
 
-verify_csum:
-        rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
-                                 rmsg, roff, vmsg, voff);
+        RETURN(0);
+}
+
+static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
+                         struct gss_ctx *mechctx)
+{
+        int     rc;
+
+        if (desc->bd_iov_count == 0)
+                return 0;
+
+        rc = sptlrpc_enc_pool_get_pages(desc);
+        if (rc)
+                return rc;
+
+        if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
+                return -EACCES;
+
+        return 0;
+}
+
+int gss_cli_prep_bulk(struct ptlrpc_request *req,
+                      struct ptlrpc_bulk_desc *desc)
+{
+        int             rc;
+        ENTRY;
+
+        LASSERT(req->rq_cli_ctx);
+        LASSERT(req->rq_pack_bulk);
+        LASSERT(req->rq_bulk_read);
+
+        if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
+                RETURN(0);
+
+        rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
+        if (rc)
+                CERROR("bulk read: failed to prepare encryption "
+                       "pages: %d\n", rc);
+
+        RETURN(rc);
+}
+
+int gss_svc_prep_bulk(struct ptlrpc_request *req,
+                      struct ptlrpc_bulk_desc *desc)
+{
+        struct gss_svc_reqctx        *grctx;
+        struct ptlrpc_bulk_sec_desc  *bsd;
+        int                           rc;
+        ENTRY;
+
+        LASSERT(req->rq_svc_ctx);
+        LASSERT(req->rq_pack_bulk);
+        LASSERT(req->rq_bulk_write);
+
+        grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+        LASSERT(grctx->src_reqbsd);
+        LASSERT(grctx->src_repbsd);
+        LASSERT(grctx->src_ctx);
+        LASSERT(grctx->src_ctx->gsc_mechctx);
+
+        bsd = grctx->src_reqbsd;
+        if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
+                RETURN(0);
+
+        rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
+        if (rc)
+                CERROR("bulk write: failed to prepare encryption "
+                       "pages: %d\n", rc);
+
         RETURN(rc);
 }
 
@@ -288,7 +373,9 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
                         struct ptlrpc_bulk_desc *desc)
 {
         struct gss_svc_reqctx        *grctx;
-        int                           rc;
+        struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
+        rawobj_t                      token;
+        __u32                         maj;
         ENTRY;
 
         LASSERT(req->rq_svc_ctx);
@@ -302,29 +389,64 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
         LASSERT(grctx->src_ctx);
         LASSERT(grctx->src_ctx->gsc_mechctx);
 
-        /* decrypt bulk data if it's encrypted */
-        if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
-                rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
-                                     grctx->src_reqbsd->bsd_priv_alg,
-                                     grctx->src_reqbsd);
-                if (rc) {
-                        CERROR("bulk write: server failed to decrypt data\n");
-                        RETURN(rc);
+        bsdr = grctx->src_reqbsd;
+        bsdv = grctx->src_repbsd;
+
+        /* bsdr has been sanity checked during unpacking */
+        bsdv->bsd_version = 0;
+        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+        bsdv->bsd_svc = bsdr->bsd_svc;
+        bsdv->bsd_flags = 0;
+
+        switch (bsdv->bsd_svc) {
+        case SPTLRPC_BULK_SVC_INTG:
+                token.data = bsdr->bsd_data;
+                token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+                maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+                                      desc->bd_iov_count, desc->bd_iov, &token);
+                if (maj != GSS_S_COMPLETE) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("failed to verify bulk signature: %x\n", maj);
+                        RETURN(-EACCES);
+                }
+                break;
+        case SPTLRPC_BULK_SVC_PRIV:
+                if (bsdr->bsd_nob != desc->bd_nob) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("prepared nob %d doesn't match the actual "
+                               "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
+                        RETURN(-EPROTO);
                 }
-        }
 
-        /* verify bulk data checksum */
-        rc = bulk_csum_svc(desc, req->rq_bulk_read,
-                           grctx->src_reqbsd, grctx->src_reqbsd_size,
-                           grctx->src_repbsd, grctx->src_repbsd_size);
+                if (desc->bd_iov_count == 0) {
+                        LASSERT(desc->bd_nob == 0);
+                        break;
+                }
 
-        RETURN(rc);
+                token.data = bsdr->bsd_data;
+                token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+                maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
+                                       desc, &token, 0);
+                if (maj != GSS_S_COMPLETE) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("failed decrypt bulk data: %x\n", maj);
+                        RETURN(-EACCES);
+                }
+                break;
+        }
+
+        RETURN(0);
 }
 
 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
                       struct ptlrpc_bulk_desc *desc)
 {
         struct gss_svc_reqctx        *grctx;
+        struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
+        rawobj_t                      token;
+        __u32                         maj;
         int                           rc;
         ENTRY;
 
@@ -339,23 +461,56 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
         LASSERT(grctx->src_ctx);
         LASSERT(grctx->src_ctx->gsc_mechctx);
 
-        /* generate bulk data checksum */
-        rc = bulk_csum_svc(desc, req->rq_bulk_read,
-                           grctx->src_reqbsd, grctx->src_reqbsd_size,
-                           grctx->src_repbsd, grctx->src_repbsd_size);
-        if (rc)
-                RETURN(rc);
-
-        /* encrypt bulk data if required */
-        if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
-                rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
-                                     grctx->src_reqbsd->bsd_priv_alg,
-                                     grctx->src_repbsd);
-                if (rc)
-                        CERROR("bulk read: server failed to encrypt data: "
-                               "rc %d\n", rc);
+        bsdr = grctx->src_reqbsd;
+        bsdv = grctx->src_repbsd;
+
+        /* bsdr has been sanity checked during unpacking */
+        bsdv->bsd_version = 0;
+        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+        bsdv->bsd_svc = bsdr->bsd_svc;
+        bsdv->bsd_flags = 0;
+
+        switch (bsdv->bsd_svc) {
+        case SPTLRPC_BULK_SVC_INTG:
+                token.data = bsdv->bsd_data;
+                token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+                maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+                                   desc->bd_iov_count, desc->bd_iov, &token);
+                if (maj != GSS_S_COMPLETE) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("failed to sign bulk data: %x\n", maj);
+                        RETURN(-EACCES);
+                }
+                break;
+        case SPTLRPC_BULK_SVC_PRIV:
+                bsdv->bsd_nob = desc->bd_nob;
+
+                if (desc->bd_iov_count == 0) {
+                        LASSERT(desc->bd_nob == 0);
+                        break;
+                }
+
+                rc = sptlrpc_enc_pool_get_pages(desc);
+                if (rc) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("bulk read: failed to allocate encryption "
+                               "pages: %d\n", rc);
+                        RETURN(rc);
+                }
+
+                token.data = bsdv->bsd_data;
+                token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+                maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
+                                     desc, &token, 1);
+                if (maj != GSS_S_COMPLETE) {
+                        bsdv->bsd_flags |= BSD_FL_ERR;
+                        CERROR("failed to encrypt bulk data: %x\n", maj);
+                        RETURN(-EACCES);
+                }
+                break;
         }
 
-        RETURN(rc);
+        RETURN(0);
 }
-