4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/gss/gss_bulk.c
34 * Author: Eric Mei <eric.mei@sun.com>
37 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/init.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/dcache.h>
43 #include <linux/mutex.h>
44 #include <linux/crypto.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <lustre_net.h>
50 #include <lustre_import.h>
51 #include <lustre_sec.h>
54 #include "gss_internal.h"
57 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
58 struct ptlrpc_request *req,
59 struct ptlrpc_bulk_desc *desc)
61 struct gss_cli_ctx *gctx;
62 struct lustre_msg *msg;
63 struct ptlrpc_bulk_sec_desc *bsd;
70 LASSERT(req->rq_pack_bulk);
71 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
73 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
74 LASSERT(gctx->gc_mechctx);
76 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
77 case SPTLRPC_SVC_NULL:
78 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
80 offset = msg->lm_bufcount - 1;
82 case SPTLRPC_SVC_AUTH:
83 case SPTLRPC_SVC_INTG:
84 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
86 offset = msg->lm_bufcount - 2;
88 case SPTLRPC_SVC_PRIV:
89 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
91 offset = msg->lm_bufcount - 1;
97 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
100 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
101 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
103 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
106 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
107 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
109 if (req->rq_bulk_read) {
111 * bulk read: prepare receiving pages only for privacy mode.
113 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
114 return gss_cli_prep_bulk(req, desc);
117 * bulk write: sign or encrypt bulk pages.
119 bsd->bsd_nob = desc->bd_nob;
121 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
123 token.data = bsd->bsd_data;
124 token.len = lustre_msg_buflen(msg, offset) -
127 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
131 if (maj != GSS_S_COMPLETE) {
132 CWARN("failed to sign bulk data: %x\n", maj);
137 if (desc->bd_iov_count == 0)
140 rc = sptlrpc_enc_pool_get_pages(desc);
142 CERROR("bulk write: failed to allocate "
143 "encryption pages: %d\n", rc);
147 token.data = bsd->bsd_data;
148 token.len = lustre_msg_buflen(msg, offset) -
151 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
152 if (maj != GSS_S_COMPLETE) {
153 CWARN("fail to encrypt bulk data: %x\n", maj);
162 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
163 struct ptlrpc_request *req,
164 struct ptlrpc_bulk_desc *desc)
166 struct gss_cli_ctx *gctx;
167 struct lustre_msg *rmsg, *vmsg;
168 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
174 LASSERT(req->rq_pack_bulk);
175 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
177 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
178 case SPTLRPC_SVC_NULL:
179 vmsg = req->rq_repdata;
180 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
181 voff = vmsg->lm_bufcount - 1;
183 rmsg = req->rq_reqbuf;
184 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
185 roff = rmsg->lm_bufcount - 1; /* last segment */
187 case SPTLRPC_SVC_AUTH:
188 case SPTLRPC_SVC_INTG:
189 vmsg = req->rq_repdata;
190 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
191 voff = vmsg->lm_bufcount - 2;
193 rmsg = req->rq_reqbuf;
194 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
195 roff = rmsg->lm_bufcount - 2; /* second last segment */
197 case SPTLRPC_SVC_PRIV:
198 vmsg = req->rq_repdata;
199 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
200 voff = vmsg->lm_bufcount - 1;
202 rmsg = req->rq_clrbuf;
203 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
204 roff = rmsg->lm_bufcount - 1; /* last segment */
210 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
211 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
212 LASSERT(bsdr && bsdv);
214 if (bsdr->bsd_version != bsdv->bsd_version ||
215 bsdr->bsd_type != bsdv->bsd_type ||
216 bsdr->bsd_svc != bsdv->bsd_svc) {
217 CERROR("bulk security descriptor mismatch: "
218 "(%u,%u,%u) != (%u,%u,%u)\n",
219 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
220 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
224 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
225 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
226 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
229 * in privacy mode if return success, make sure bd_nob_transferred
230 * is the actual size of the clear text, otherwise upper layer
233 if (req->rq_bulk_write) {
234 if (bsdv->bsd_flags & BSD_FL_ERR) {
235 CERROR("server reported bulk i/o failure\n");
239 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
240 desc->bd_nob_transferred = desc->bd_nob;
243 * bulk read, upon return success, bd_nob_transferred is
244 * the size of plain text actually received.
246 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
247 LASSERT(gctx->gc_mechctx);
249 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
252 /* fix the actual data size */
253 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
254 if (desc->bd_vec[i].bv_len + nob >
255 desc->bd_nob_transferred) {
256 desc->bd_vec[i].bv_len =
257 desc->bd_nob_transferred - nob;
259 nob += desc->bd_vec[i].bv_len;
262 token.data = bsdv->bsd_data;
263 token.len = lustre_msg_buflen(vmsg, voff) -
266 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
270 if (maj != GSS_S_COMPLETE) {
271 CERROR("failed to verify bulk read: %x\n", maj);
274 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
275 desc->bd_nob = bsdv->bsd_nob;
276 if (desc->bd_nob == 0)
279 token.data = bsdv->bsd_data;
280 token.len = lustre_msg_buflen(vmsg, voff) -
283 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
285 if (maj != GSS_S_COMPLETE) {
286 CERROR("failed to decrypt bulk read: %x\n",
291 desc->bd_nob_transferred = desc->bd_nob;
298 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
299 struct gss_ctx *mechctx)
303 if (desc->bd_iov_count == 0)
306 rc = sptlrpc_enc_pool_get_pages(desc);
310 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
316 int gss_cli_prep_bulk(struct ptlrpc_request *req,
317 struct ptlrpc_bulk_desc *desc)
322 LASSERT(req->rq_cli_ctx);
323 LASSERT(req->rq_pack_bulk);
324 LASSERT(req->rq_bulk_read);
326 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
329 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
331 CERROR("bulk read: failed to prepare encryption "
337 int gss_svc_prep_bulk(struct ptlrpc_request *req,
338 struct ptlrpc_bulk_desc *desc)
340 struct gss_svc_reqctx *grctx;
341 struct ptlrpc_bulk_sec_desc *bsd;
345 LASSERT(req->rq_svc_ctx);
346 LASSERT(req->rq_pack_bulk);
347 LASSERT(req->rq_bulk_write);
349 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
350 LASSERT(grctx->src_reqbsd);
351 LASSERT(grctx->src_repbsd);
352 LASSERT(grctx->src_ctx);
353 LASSERT(grctx->src_ctx->gsc_mechctx);
355 bsd = grctx->src_reqbsd;
356 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
359 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
361 CERROR("bulk write: failed to prepare encryption "
367 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
368 struct ptlrpc_bulk_desc *desc)
370 struct gss_svc_reqctx *grctx;
371 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
376 LASSERT(req->rq_svc_ctx);
377 LASSERT(req->rq_pack_bulk);
378 LASSERT(req->rq_bulk_write);
380 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
382 LASSERT(grctx->src_reqbsd);
383 LASSERT(grctx->src_repbsd);
384 LASSERT(grctx->src_ctx);
385 LASSERT(grctx->src_ctx->gsc_mechctx);
387 bsdr = grctx->src_reqbsd;
388 bsdv = grctx->src_repbsd;
390 /* bsdr has been sanity checked during unpacking */
391 bsdv->bsd_version = 0;
392 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
393 bsdv->bsd_svc = bsdr->bsd_svc;
396 switch (bsdv->bsd_svc) {
397 case SPTLRPC_BULK_SVC_INTG:
398 token.data = bsdr->bsd_data;
399 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
401 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
403 desc->bd_vec, &token);
404 if (maj != GSS_S_COMPLETE) {
405 bsdv->bsd_flags |= BSD_FL_ERR;
406 CERROR("failed to verify bulk signature: %x\n", maj);
410 case SPTLRPC_BULK_SVC_PRIV:
411 if (bsdr->bsd_nob != desc->bd_nob) {
412 bsdv->bsd_flags |= BSD_FL_ERR;
413 CERROR("prepared nob %d doesn't match the actual "
414 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
418 if (desc->bd_iov_count == 0) {
419 LASSERT(desc->bd_nob == 0);
423 token.data = bsdr->bsd_data;
424 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
426 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
428 if (maj != GSS_S_COMPLETE) {
429 bsdv->bsd_flags |= BSD_FL_ERR;
430 CERROR("failed decrypt bulk data: %x\n", maj);
434 /* mimic gss_cli_ctx_unwrap_bulk */
435 desc->bd_nob_transferred = desc->bd_nob;
443 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
444 struct ptlrpc_bulk_desc *desc)
446 struct gss_svc_reqctx *grctx;
447 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
453 LASSERT(req->rq_svc_ctx);
454 LASSERT(req->rq_pack_bulk);
455 LASSERT(req->rq_bulk_read);
457 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
459 LASSERT(grctx->src_reqbsd);
460 LASSERT(grctx->src_repbsd);
461 LASSERT(grctx->src_ctx);
462 LASSERT(grctx->src_ctx->gsc_mechctx);
464 bsdr = grctx->src_reqbsd;
465 bsdv = grctx->src_repbsd;
467 /* bsdr has been sanity checked during unpacking */
468 bsdv->bsd_version = 0;
469 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
470 bsdv->bsd_svc = bsdr->bsd_svc;
473 switch (bsdv->bsd_svc) {
474 case SPTLRPC_BULK_SVC_INTG:
475 token.data = bsdv->bsd_data;
476 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
478 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
480 desc->bd_vec, &token);
481 if (maj != GSS_S_COMPLETE) {
482 bsdv->bsd_flags |= BSD_FL_ERR;
483 CERROR("failed to sign bulk data: %x\n", maj);
487 case SPTLRPC_BULK_SVC_PRIV:
488 bsdv->bsd_nob = desc->bd_nob;
490 if (desc->bd_iov_count == 0) {
491 LASSERT(desc->bd_nob == 0);
495 rc = sptlrpc_enc_pool_get_pages(desc);
497 bsdv->bsd_flags |= BSD_FL_ERR;
498 CERROR("bulk read: failed to allocate encryption "
503 token.data = bsdv->bsd_data;
504 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
506 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
508 if (maj != GSS_S_COMPLETE) {
509 bsdv->bsd_flags |= BSD_FL_ERR;
510 CERROR("failed to encrypt bulk data: %x\n", maj);