4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/gss/gss_bulk.c
34 * Author: Eric Mei <eric.mei@sun.com>
37 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/init.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/dcache.h>
43 #include <linux/mutex.h>
44 #include <linux/crypto.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <lustre_net.h>
50 #include <lustre_import.h>
51 #include <lustre_sec.h>
54 #include "gss_internal.h"
57 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
58 struct ptlrpc_request *req,
59 struct ptlrpc_bulk_desc *desc)
61 struct gss_cli_ctx *gctx;
62 struct lustre_msg *msg;
63 struct ptlrpc_bulk_sec_desc *bsd;
70 LASSERT(req->rq_pack_bulk);
71 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
72 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
74 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
75 LASSERT(gctx->gc_mechctx);
77 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
78 case SPTLRPC_SVC_NULL:
79 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
81 offset = msg->lm_bufcount - 1;
83 case SPTLRPC_SVC_AUTH:
84 case SPTLRPC_SVC_INTG:
85 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
87 offset = msg->lm_bufcount - 2;
89 case SPTLRPC_SVC_PRIV:
90 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
92 offset = msg->lm_bufcount - 1;
98 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
101 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
102 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
104 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
107 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
108 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
110 if (req->rq_bulk_read) {
112 * bulk read: prepare receiving pages only for privacy mode.
114 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
115 return gss_cli_prep_bulk(req, desc);
118 * bulk write: sign or encrypt bulk pages.
120 bsd->bsd_nob = desc->bd_nob;
122 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
124 token.data = bsd->bsd_data;
125 token.len = lustre_msg_buflen(msg, offset) -
128 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
132 if (maj != GSS_S_COMPLETE) {
133 CWARN("failed to sign bulk data: %x\n", maj);
138 if (desc->bd_iov_count == 0)
141 rc = sptlrpc_enc_pool_get_pages(desc);
143 CERROR("bulk write: failed to allocate "
144 "encryption pages: %d\n", rc);
148 token.data = bsd->bsd_data;
149 token.len = lustre_msg_buflen(msg, offset) -
152 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
153 if (maj != GSS_S_COMPLETE) {
154 CWARN("fail to encrypt bulk data: %x\n", maj);
163 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
164 struct ptlrpc_request *req,
165 struct ptlrpc_bulk_desc *desc)
167 struct gss_cli_ctx *gctx;
168 struct lustre_msg *rmsg, *vmsg;
169 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
175 LASSERT(req->rq_pack_bulk);
176 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
177 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
179 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
180 case SPTLRPC_SVC_NULL:
181 vmsg = req->rq_repdata;
182 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
183 voff = vmsg->lm_bufcount - 1;
185 rmsg = req->rq_reqbuf;
186 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
187 roff = rmsg->lm_bufcount - 1; /* last segment */
189 case SPTLRPC_SVC_AUTH:
190 case SPTLRPC_SVC_INTG:
191 vmsg = req->rq_repdata;
192 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
193 voff = vmsg->lm_bufcount - 2;
195 rmsg = req->rq_reqbuf;
196 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
197 roff = rmsg->lm_bufcount - 2; /* second last segment */
199 case SPTLRPC_SVC_PRIV:
200 vmsg = req->rq_repdata;
201 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
202 voff = vmsg->lm_bufcount - 1;
204 rmsg = req->rq_clrbuf;
205 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
206 roff = rmsg->lm_bufcount - 1; /* last segment */
212 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
213 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
214 LASSERT(bsdr && bsdv);
216 if (bsdr->bsd_version != bsdv->bsd_version ||
217 bsdr->bsd_type != bsdv->bsd_type ||
218 bsdr->bsd_svc != bsdv->bsd_svc) {
219 CERROR("bulk security descriptor mismatch: "
220 "(%u,%u,%u) != (%u,%u,%u)\n",
221 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
222 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
226 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
227 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
228 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
231 * in privacy mode if return success, make sure bd_nob_transferred
232 * is the actual size of the clear text, otherwise upper layer
235 if (req->rq_bulk_write) {
236 if (bsdv->bsd_flags & BSD_FL_ERR) {
237 CERROR("server reported bulk i/o failure\n");
241 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
242 desc->bd_nob_transferred = desc->bd_nob;
245 * bulk read, upon return success, bd_nob_transferred is
246 * the size of plain text actually received.
248 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
249 LASSERT(gctx->gc_mechctx);
251 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
254 /* fix the actual data size */
255 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
256 if (BD_GET_KIOV(desc, i).kiov_len + nob >
257 desc->bd_nob_transferred) {
258 BD_GET_KIOV(desc, i).kiov_len =
259 desc->bd_nob_transferred - nob;
261 nob += BD_GET_KIOV(desc, i).kiov_len;
264 token.data = bsdv->bsd_data;
265 token.len = lustre_msg_buflen(vmsg, voff) -
268 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
272 if (maj != GSS_S_COMPLETE) {
273 CERROR("failed to verify bulk read: %x\n", maj);
276 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
277 desc->bd_nob = bsdv->bsd_nob;
278 if (desc->bd_nob == 0)
281 token.data = bsdv->bsd_data;
282 token.len = lustre_msg_buflen(vmsg, voff) -
285 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
287 if (maj != GSS_S_COMPLETE) {
288 CERROR("failed to decrypt bulk read: %x\n",
293 desc->bd_nob_transferred = desc->bd_nob;
300 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
301 struct gss_ctx *mechctx)
305 if (desc->bd_iov_count == 0)
308 rc = sptlrpc_enc_pool_get_pages(desc);
312 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
318 int gss_cli_prep_bulk(struct ptlrpc_request *req,
319 struct ptlrpc_bulk_desc *desc)
324 LASSERT(req->rq_cli_ctx);
325 LASSERT(req->rq_pack_bulk);
326 LASSERT(req->rq_bulk_read);
328 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
331 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
333 CERROR("bulk read: failed to prepare encryption "
339 int gss_svc_prep_bulk(struct ptlrpc_request *req,
340 struct ptlrpc_bulk_desc *desc)
342 struct gss_svc_reqctx *grctx;
343 struct ptlrpc_bulk_sec_desc *bsd;
347 LASSERT(req->rq_svc_ctx);
348 LASSERT(req->rq_pack_bulk);
349 LASSERT(req->rq_bulk_write);
351 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
352 LASSERT(grctx->src_reqbsd);
353 LASSERT(grctx->src_repbsd);
354 LASSERT(grctx->src_ctx);
355 LASSERT(grctx->src_ctx->gsc_mechctx);
357 bsd = grctx->src_reqbsd;
358 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
361 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
363 CERROR("bulk write: failed to prepare encryption "
369 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
370 struct ptlrpc_bulk_desc *desc)
372 struct gss_svc_reqctx *grctx;
373 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
378 LASSERT(req->rq_svc_ctx);
379 LASSERT(req->rq_pack_bulk);
380 LASSERT(req->rq_bulk_write);
381 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
383 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
385 LASSERT(grctx->src_reqbsd);
386 LASSERT(grctx->src_repbsd);
387 LASSERT(grctx->src_ctx);
388 LASSERT(grctx->src_ctx->gsc_mechctx);
390 bsdr = grctx->src_reqbsd;
391 bsdv = grctx->src_repbsd;
393 /* bsdr has been sanity checked during unpacking */
394 bsdv->bsd_version = 0;
395 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
396 bsdv->bsd_svc = bsdr->bsd_svc;
399 switch (bsdv->bsd_svc) {
400 case SPTLRPC_BULK_SVC_INTG:
401 token.data = bsdr->bsd_data;
402 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
404 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
406 GET_KIOV(desc), &token);
407 if (maj != GSS_S_COMPLETE) {
408 bsdv->bsd_flags |= BSD_FL_ERR;
409 CERROR("failed to verify bulk signature: %x\n", maj);
413 case SPTLRPC_BULK_SVC_PRIV:
414 if (bsdr->bsd_nob != desc->bd_nob) {
415 bsdv->bsd_flags |= BSD_FL_ERR;
416 CERROR("prepared nob %d doesn't match the actual "
417 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
421 if (desc->bd_iov_count == 0) {
422 LASSERT(desc->bd_nob == 0);
426 token.data = bsdr->bsd_data;
427 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
429 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
431 if (maj != GSS_S_COMPLETE) {
432 bsdv->bsd_flags |= BSD_FL_ERR;
433 CERROR("failed decrypt bulk data: %x\n", maj);
437 /* mimic gss_cli_ctx_unwrap_bulk */
438 desc->bd_nob_transferred = desc->bd_nob;
446 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
447 struct ptlrpc_bulk_desc *desc)
449 struct gss_svc_reqctx *grctx;
450 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
456 LASSERT(req->rq_svc_ctx);
457 LASSERT(req->rq_pack_bulk);
458 LASSERT(req->rq_bulk_read);
459 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
461 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
463 LASSERT(grctx->src_reqbsd);
464 LASSERT(grctx->src_repbsd);
465 LASSERT(grctx->src_ctx);
466 LASSERT(grctx->src_ctx->gsc_mechctx);
468 bsdr = grctx->src_reqbsd;
469 bsdv = grctx->src_repbsd;
471 /* bsdr has been sanity checked during unpacking */
472 bsdv->bsd_version = 0;
473 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
474 bsdv->bsd_svc = bsdr->bsd_svc;
477 switch (bsdv->bsd_svc) {
478 case SPTLRPC_BULK_SVC_INTG:
479 token.data = bsdv->bsd_data;
480 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
482 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
484 GET_KIOV(desc), &token);
485 if (maj != GSS_S_COMPLETE) {
486 bsdv->bsd_flags |= BSD_FL_ERR;
487 CERROR("failed to sign bulk data: %x\n", maj);
491 case SPTLRPC_BULK_SVC_PRIV:
492 bsdv->bsd_nob = desc->bd_nob;
494 if (desc->bd_iov_count == 0) {
495 LASSERT(desc->bd_nob == 0);
499 rc = sptlrpc_enc_pool_get_pages(desc);
501 bsdv->bsd_flags |= BSD_FL_ERR;
502 CERROR("bulk read: failed to allocate encryption "
507 token.data = bsdv->bsd_data;
508 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
510 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
512 if (maj != GSS_S_COMPLETE) {
513 bsdv->bsd_flags |= BSD_FL_ERR;
514 CERROR("failed to encrypt bulk data: %x\n", maj);