-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Eric Mei <eric.mei@sun.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <linux/crypto.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct gss_cli_ctx *gctx;
- struct lustre_msg *msg;
- struct ptlrpc_bulk_sec_desc *bsd;
- rawobj_t token;
- __u32 maj;
- int offset;
- int rc;
- ENTRY;
-
- LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
- gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- LASSERT(gctx->gc_mechctx);
-
- switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
- case SPTLRPC_SVC_NULL:
- LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
- msg = req->rq_reqbuf;
- offset = msg->lm_bufcount - 1;
- break;
- case SPTLRPC_SVC_AUTH:
- case SPTLRPC_SVC_INTG:
- LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
- msg = req->rq_reqbuf;
- offset = msg->lm_bufcount - 2;
- break;
- case SPTLRPC_SVC_PRIV:
- LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
- msg = req->rq_clrbuf;
- offset = msg->lm_bufcount - 1;
- break;
- default:
- LBUG();
- }
-
- bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
- bsd->bsd_version = 0;
- bsd->bsd_flags = 0;
- bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
- bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
-
- if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- RETURN(0);
-
- LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
- bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
-
- if (req->rq_bulk_read) {
- /*
- * bulk read: prepare receiving pages only for privacy mode.
- */
- if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
- return gss_cli_prep_bulk(req, desc);
- } else {
- /*
- * bulk write: sign or encrypt bulk pages.
- */
- bsd->bsd_nob = desc->bd_nob;
-
- if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
- /* integrity mode */
- token.data = bsd->bsd_data;
- token.len = lustre_msg_buflen(msg, offset) -
- sizeof(*bsd);
-
- maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
- desc->bd_iov_count, desc->bd_iov,
- &token);
- if (maj != GSS_S_COMPLETE) {
- CWARN("failed to sign bulk data: %x\n", maj);
- RETURN(-EACCES);
- }
- } else {
- /* privacy mode */
- if (desc->bd_iov_count == 0)
- RETURN(0);
-
- rc = sptlrpc_enc_pool_get_pages(desc);
- if (rc) {
- CERROR("bulk write: failed to allocate "
- "encryption pages: %d\n", rc);
- RETURN(rc);
- }
-
- token.data = bsd->bsd_data;
- token.len = lustre_msg_buflen(msg, offset) -
- sizeof(*bsd);
-
- maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
- if (maj != GSS_S_COMPLETE) {
- CWARN("fail to encrypt bulk data: %x\n", maj);
- RETURN(-EACCES);
- }
- }
- }
-
- RETURN(0);
+ struct gss_cli_ctx *gctx;
+ struct lustre_msg *msg;
+ struct ptlrpc_bulk_sec_desc *bsd;
+ rawobj_t token;
+ __u32 maj;
+ int offset;
+ int rc;
+ ENTRY;
+
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ LASSERT(gctx->gc_mechctx);
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ case SPTLRPC_SVC_NULL:
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
+ msg = req->rq_reqbuf;
+ offset = msg->lm_bufcount - 1;
+ break;
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
+ msg = req->rq_reqbuf;
+ offset = msg->lm_bufcount - 2;
+ break;
+ case SPTLRPC_SVC_PRIV:
+ LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
+ msg = req->rq_clrbuf;
+ offset = msg->lm_bufcount - 1;
+ break;
+ default:
+ LBUG();
+ }
+
+ bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ RETURN(0);
+
+ LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+ bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
+
+ if (req->rq_bulk_read) {
+ /*
+ * bulk read: prepare receiving pages only for privacy mode.
+ */
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+ return gss_cli_prep_bulk(req, desc);
+ } else {
+ /*
+ * bulk write: sign or encrypt bulk pages.
+ */
+ bsd->bsd_nob = desc->bd_nob;
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ /* integrity mode */
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
+
+ maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count,
+ GET_KIOV(desc),
+ &token);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("failed to sign bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ } else {
+ /* privacy mode */
+ if (desc->bd_iov_count == 0)
+ RETURN(0);
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc) {
+ CERROR("bulk write: failed to allocate "
+ "encryption pages: %d\n", rc);
+ RETURN(rc);
+ }
+
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
+
+ maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("fail to encrypt bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ }
+ }
+
+ RETURN(0);
}
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
vmsg = req->rq_repdata;
+ LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
voff = vmsg->lm_bufcount - 1;
- LASSERT(vmsg && vmsg->lm_bufcount >= 3);
rmsg = req->rq_reqbuf;
+ LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
roff = rmsg->lm_bufcount - 1; /* last segment */
- LASSERT(rmsg && rmsg->lm_bufcount >= 3);
break;
case SPTLRPC_SVC_AUTH:
case SPTLRPC_SVC_INTG:
vmsg = req->rq_repdata;
+ LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
voff = vmsg->lm_bufcount - 2;
- LASSERT(vmsg && vmsg->lm_bufcount >= 4);
rmsg = req->rq_reqbuf;
+ LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
roff = rmsg->lm_bufcount - 2; /* second last segment */
- LASSERT(rmsg && rmsg->lm_bufcount >= 4);
break;
case SPTLRPC_SVC_PRIV:
vmsg = req->rq_repdata;
+ LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
voff = vmsg->lm_bufcount - 1;
- LASSERT(vmsg && vmsg->lm_bufcount >= 2);
rmsg = req->rq_clrbuf;
+ LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
roff = rmsg->lm_bufcount - 1; /* last segment */
- LASSERT(rmsg && rmsg->lm_bufcount >= 2);
break;
default:
LBUG();
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
LASSERT(gctx->gc_mechctx);
- if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
- int i, nob;
-
- /* fix the actual data size */
- for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len + nob >
- desc->bd_nob_transferred) {
- desc->bd_iov[i].kiov_len =
- desc->bd_nob_transferred - nob;
- }
- nob += desc->bd_iov[i].kiov_len;
- }
-
- token.data = bsdv->bsd_data;
- token.len = lustre_msg_buflen(vmsg, voff) -
- sizeof(*bsdv);
-
- maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
- desc->bd_iov_count, desc->bd_iov,
- &token);
+ if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ int i, nob;
+
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (BD_GET_KIOV(desc, i).kiov_len + nob >
+ desc->bd_nob_transferred) {
+ BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += BD_GET_KIOV(desc, i).kiov_len;
+ }
+
+ token.data = bsdv->bsd_data;
+ token.len = lustre_msg_buflen(vmsg, voff) -
+ sizeof(*bsdv);
+
+ maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count,
+ GET_KIOV(desc),
+ &token);
if (maj != GSS_S_COMPLETE) {
CERROR("failed to verify bulk read: %x\n", maj);
RETURN(-EACCES);
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_write);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
token.data = bsdr->bsd_data;
token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
- maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
- desc->bd_iov_count, desc->bd_iov, &token);
+ maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count,
+ GET_KIOV(desc), &token);
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to verify bulk signature: %x\n", maj);
CERROR("failed decrypt bulk data: %x\n", maj);
RETURN(-EACCES);
}
+
+ /* mimic gss_cli_ctx_unwrap_bulk */
+ desc->bd_nob_transferred = desc->bd_nob;
+
break;
}
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read);
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
token.data = bsdv->bsd_data;
token.len = grctx->src_repbsd_size - sizeof(*bsdv);
- maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
- desc->bd_iov_count, desc->bd_iov, &token);
- if (maj != GSS_S_COMPLETE) {
+ maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count,
+ GET_KIOV(desc), &token);
+ if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to sign bulk data: %x\n", maj);
RETURN(-EACCES);