4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/gss/gss_bulk.c
33 * Author: Eric Mei <eric.mei@sun.com>
36 #define DEBUG_SUBSYSTEM S_SEC
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/dcache.h>
42 #include <linux/mutex.h>
43 #include <linux/crypto.h>
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_sec.h>
53 #include "gss_internal.h"
56 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
57 struct ptlrpc_request *req,
58 struct ptlrpc_bulk_desc *desc)
60 struct gss_cli_ctx *gctx;
61 struct lustre_msg *msg;
62 struct ptlrpc_bulk_sec_desc *bsd;
69 LASSERT(req->rq_pack_bulk);
70 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
72 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
73 LASSERT(gctx->gc_mechctx);
75 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
76 case SPTLRPC_SVC_NULL:
77 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
79 offset = msg->lm_bufcount - 1;
81 case SPTLRPC_SVC_AUTH:
82 case SPTLRPC_SVC_INTG:
83 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
85 offset = msg->lm_bufcount - 2;
87 case SPTLRPC_SVC_PRIV:
88 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
90 offset = msg->lm_bufcount - 1;
96 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
99 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
100 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
102 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
105 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
106 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
108 if (req->rq_bulk_read) {
110 * bulk read: prepare receiving pages only for privacy mode.
112 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
113 return gss_cli_prep_bulk(req, desc);
116 * bulk write: sign or encrypt bulk pages.
118 bsd->bsd_nob = desc->bd_nob;
120 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
122 token.data = bsd->bsd_data;
123 token.len = lustre_msg_buflen(msg, offset) -
126 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
130 if (maj != GSS_S_COMPLETE) {
131 CWARN("failed to sign bulk data: %x\n", maj);
136 if (desc->bd_iov_count == 0)
139 rc = sptlrpc_pool_get_desc_pages(desc);
141 CERROR("bulk write: failed to allocate "
142 "encryption pages: %d\n", rc);
146 token.data = bsd->bsd_data;
147 token.len = lustre_msg_buflen(msg, offset) -
150 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
151 if (maj != GSS_S_COMPLETE) {
152 CWARN("fail to encrypt bulk data: %x\n", maj);
161 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
162 struct ptlrpc_request *req,
163 struct ptlrpc_bulk_desc *desc)
165 struct gss_cli_ctx *gctx;
166 struct lustre_msg *rmsg, *vmsg;
167 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
173 LASSERT(req->rq_pack_bulk);
174 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
176 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
177 case SPTLRPC_SVC_NULL:
178 vmsg = req->rq_repdata;
179 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
180 voff = vmsg->lm_bufcount - 1;
182 rmsg = req->rq_reqbuf;
183 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
184 roff = rmsg->lm_bufcount - 1; /* last segment */
186 case SPTLRPC_SVC_AUTH:
187 case SPTLRPC_SVC_INTG:
188 vmsg = req->rq_repdata;
189 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
190 voff = vmsg->lm_bufcount - 2;
192 rmsg = req->rq_reqbuf;
193 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
194 roff = rmsg->lm_bufcount - 2; /* second last segment */
196 case SPTLRPC_SVC_PRIV:
197 vmsg = req->rq_repdata;
198 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
199 voff = vmsg->lm_bufcount - 1;
201 rmsg = req->rq_clrbuf;
202 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
203 roff = rmsg->lm_bufcount - 1; /* last segment */
209 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
210 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
211 LASSERT(bsdr && bsdv);
213 if (bsdr->bsd_version != bsdv->bsd_version ||
214 bsdr->bsd_type != bsdv->bsd_type ||
215 bsdr->bsd_svc != bsdv->bsd_svc) {
216 CERROR("bulk security descriptor mismatch: "
217 "(%u,%u,%u) != (%u,%u,%u)\n",
218 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
219 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
223 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
224 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
225 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
228 * in privacy mode if return success, make sure bd_nob_transferred
229 * is the actual size of the clear text, otherwise upper layer
232 if (req->rq_bulk_write) {
233 if (bsdv->bsd_flags & BSD_FL_ERR) {
234 CERROR("server reported bulk i/o failure\n");
238 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
239 desc->bd_nob_transferred = desc->bd_nob;
242 * bulk read, upon return success, bd_nob_transferred is
243 * the size of plain text actually received.
245 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
246 LASSERT(gctx->gc_mechctx);
248 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
251 /* fix the actual data size */
252 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
253 if (desc->bd_vec[i].bv_len + nob >
254 desc->bd_nob_transferred) {
255 desc->bd_vec[i].bv_len =
256 desc->bd_nob_transferred - nob;
258 nob += desc->bd_vec[i].bv_len;
261 token.data = bsdv->bsd_data;
262 token.len = lustre_msg_buflen(vmsg, voff) -
265 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
269 if (maj != GSS_S_COMPLETE) {
270 CERROR("failed to verify bulk read: %x\n", maj);
273 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
274 desc->bd_nob = bsdv->bsd_nob;
275 if (desc->bd_nob == 0)
278 token.data = bsdv->bsd_data;
279 token.len = lustre_msg_buflen(vmsg, voff) -
282 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
284 if (maj != GSS_S_COMPLETE) {
285 CERROR("failed to decrypt bulk read: %x\n",
290 desc->bd_nob_transferred = desc->bd_nob;
297 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
298 struct gss_ctx *mechctx)
302 if (desc->bd_iov_count == 0)
305 rc = sptlrpc_pool_get_desc_pages(desc);
309 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
315 int gss_cli_prep_bulk(struct ptlrpc_request *req,
316 struct ptlrpc_bulk_desc *desc)
321 LASSERT(req->rq_cli_ctx);
322 LASSERT(req->rq_pack_bulk);
323 LASSERT(req->rq_bulk_read);
325 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
328 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
330 CERROR("bulk read: failed to prepare encryption "
336 int gss_svc_prep_bulk(struct ptlrpc_request *req,
337 struct ptlrpc_bulk_desc *desc)
339 struct gss_svc_reqctx *grctx;
340 struct ptlrpc_bulk_sec_desc *bsd;
344 LASSERT(req->rq_svc_ctx);
345 LASSERT(req->rq_pack_bulk);
346 LASSERT(req->rq_bulk_write);
348 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
349 LASSERT(grctx->src_reqbsd);
350 LASSERT(grctx->src_repbsd);
351 LASSERT(grctx->src_ctx);
352 LASSERT(grctx->src_ctx->gsc_mechctx);
354 bsd = grctx->src_reqbsd;
355 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
358 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
360 CERROR("bulk write: failed to prepare encryption "
366 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
367 struct ptlrpc_bulk_desc *desc)
369 struct gss_svc_reqctx *grctx;
370 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
375 LASSERT(req->rq_svc_ctx);
376 LASSERT(req->rq_pack_bulk);
377 LASSERT(req->rq_bulk_write);
379 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
381 LASSERT(grctx->src_reqbsd);
382 LASSERT(grctx->src_repbsd);
383 LASSERT(grctx->src_ctx);
384 LASSERT(grctx->src_ctx->gsc_mechctx);
386 bsdr = grctx->src_reqbsd;
387 bsdv = grctx->src_repbsd;
389 /* bsdr has been sanity checked during unpacking */
390 bsdv->bsd_version = 0;
391 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
392 bsdv->bsd_svc = bsdr->bsd_svc;
395 switch (bsdv->bsd_svc) {
396 case SPTLRPC_BULK_SVC_INTG:
397 token.data = bsdr->bsd_data;
398 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
400 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
402 desc->bd_vec, &token);
403 if (maj != GSS_S_COMPLETE) {
404 bsdv->bsd_flags |= BSD_FL_ERR;
405 CERROR("failed to verify bulk signature: %x\n", maj);
409 case SPTLRPC_BULK_SVC_PRIV:
410 if (bsdr->bsd_nob != desc->bd_nob) {
411 bsdv->bsd_flags |= BSD_FL_ERR;
412 CERROR("prepared nob %d doesn't match the actual "
413 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
417 if (desc->bd_iov_count == 0) {
418 LASSERT(desc->bd_nob == 0);
422 token.data = bsdr->bsd_data;
423 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
425 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
427 if (maj != GSS_S_COMPLETE) {
428 bsdv->bsd_flags |= BSD_FL_ERR;
429 CERROR("failed decrypt bulk data: %x\n", maj);
433 /* mimic gss_cli_ctx_unwrap_bulk */
434 desc->bd_nob_transferred = desc->bd_nob;
442 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
443 struct ptlrpc_bulk_desc *desc)
445 struct gss_svc_reqctx *grctx;
446 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
452 LASSERT(req->rq_svc_ctx);
453 LASSERT(req->rq_pack_bulk);
454 LASSERT(req->rq_bulk_read);
456 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
458 LASSERT(grctx->src_reqbsd);
459 LASSERT(grctx->src_repbsd);
460 LASSERT(grctx->src_ctx);
461 LASSERT(grctx->src_ctx->gsc_mechctx);
463 bsdr = grctx->src_reqbsd;
464 bsdv = grctx->src_repbsd;
466 /* bsdr has been sanity checked during unpacking */
467 bsdv->bsd_version = 0;
468 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
469 bsdv->bsd_svc = bsdr->bsd_svc;
472 switch (bsdv->bsd_svc) {
473 case SPTLRPC_BULK_SVC_INTG:
474 token.data = bsdv->bsd_data;
475 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
477 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
479 desc->bd_vec, &token);
480 if (maj != GSS_S_COMPLETE) {
481 bsdv->bsd_flags |= BSD_FL_ERR;
482 CERROR("failed to sign bulk data: %x\n", maj);
486 case SPTLRPC_BULK_SVC_PRIV:
487 bsdv->bsd_nob = desc->bd_nob;
489 if (desc->bd_iov_count == 0) {
490 LASSERT(desc->bd_nob == 0);
494 rc = sptlrpc_pool_get_desc_pages(desc);
496 bsdv->bsd_flags |= BSD_FL_ERR;
497 CERROR("bulk read: failed to allocate encryption "
502 token.data = bsdv->bsd_data;
503 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
505 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
507 if (maj != GSS_S_COMPLETE) {
508 bsdv->bsd_flags |= BSD_FL_ERR;
509 CERROR("failed to encrypt bulk data: %x\n", maj);