4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/gss/gss_bulk.c
38 * Author: Eric Mei <eric.mei@sun.com>
41 #define DEBUG_SUBSYSTEM S_SEC
42 #include <linux/init.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/dcache.h>
47 #include <linux/mutex.h>
48 #include <linux/crypto.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre/lustre_idl.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_sec.h>
59 #include "gss_internal.h"
62 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
63 struct ptlrpc_request *req,
64 struct ptlrpc_bulk_desc *desc)
66 struct gss_cli_ctx *gctx;
67 struct lustre_msg *msg;
68 struct ptlrpc_bulk_sec_desc *bsd;
75 LASSERT(req->rq_pack_bulk);
76 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
77 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
79 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
80 LASSERT(gctx->gc_mechctx);
82 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
83 case SPTLRPC_SVC_NULL:
84 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
86 offset = msg->lm_bufcount - 1;
88 case SPTLRPC_SVC_AUTH:
89 case SPTLRPC_SVC_INTG:
90 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
92 offset = msg->lm_bufcount - 2;
94 case SPTLRPC_SVC_PRIV:
95 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
97 offset = msg->lm_bufcount - 1;
103 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
104 bsd->bsd_version = 0;
106 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
107 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
109 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
112 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
113 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
115 if (req->rq_bulk_read) {
117 * bulk read: prepare receiving pages only for privacy mode.
119 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
120 return gss_cli_prep_bulk(req, desc);
123 * bulk write: sign or encrypt bulk pages.
125 bsd->bsd_nob = desc->bd_nob;
127 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
129 token.data = bsd->bsd_data;
130 token.len = lustre_msg_buflen(msg, offset) -
133 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
137 if (maj != GSS_S_COMPLETE) {
138 CWARN("failed to sign bulk data: %x\n", maj);
143 if (desc->bd_iov_count == 0)
146 rc = sptlrpc_enc_pool_get_pages(desc);
148 CERROR("bulk write: failed to allocate "
149 "encryption pages: %d\n", rc);
153 token.data = bsd->bsd_data;
154 token.len = lustre_msg_buflen(msg, offset) -
157 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
158 if (maj != GSS_S_COMPLETE) {
159 CWARN("fail to encrypt bulk data: %x\n", maj);
168 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
169 struct ptlrpc_request *req,
170 struct ptlrpc_bulk_desc *desc)
172 struct gss_cli_ctx *gctx;
173 struct lustre_msg *rmsg, *vmsg;
174 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
180 LASSERT(req->rq_pack_bulk);
181 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
182 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
184 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
185 case SPTLRPC_SVC_NULL:
186 vmsg = req->rq_repdata;
187 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
188 voff = vmsg->lm_bufcount - 1;
190 rmsg = req->rq_reqbuf;
191 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
192 roff = rmsg->lm_bufcount - 1; /* last segment */
194 case SPTLRPC_SVC_AUTH:
195 case SPTLRPC_SVC_INTG:
196 vmsg = req->rq_repdata;
197 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
198 voff = vmsg->lm_bufcount - 2;
200 rmsg = req->rq_reqbuf;
201 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
202 roff = rmsg->lm_bufcount - 2; /* second last segment */
204 case SPTLRPC_SVC_PRIV:
205 vmsg = req->rq_repdata;
206 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
207 voff = vmsg->lm_bufcount - 1;
209 rmsg = req->rq_clrbuf;
210 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
211 roff = rmsg->lm_bufcount - 1; /* last segment */
217 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
218 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
219 LASSERT(bsdr && bsdv);
221 if (bsdr->bsd_version != bsdv->bsd_version ||
222 bsdr->bsd_type != bsdv->bsd_type ||
223 bsdr->bsd_svc != bsdv->bsd_svc) {
224 CERROR("bulk security descriptor mismatch: "
225 "(%u,%u,%u) != (%u,%u,%u)\n",
226 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
227 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
231 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
232 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
233 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
236 * in privacy mode if return success, make sure bd_nob_transferred
237 * is the actual size of the clear text, otherwise upper layer
240 if (req->rq_bulk_write) {
241 if (bsdv->bsd_flags & BSD_FL_ERR) {
242 CERROR("server reported bulk i/o failure\n");
246 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
247 desc->bd_nob_transferred = desc->bd_nob;
250 * bulk read, upon return success, bd_nob_transferred is
251 * the size of plain text actually received.
253 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
254 LASSERT(gctx->gc_mechctx);
256 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
259 /* fix the actual data size */
260 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
261 if (BD_GET_KIOV(desc, i).kiov_len + nob >
262 desc->bd_nob_transferred) {
263 BD_GET_KIOV(desc, i).kiov_len =
264 desc->bd_nob_transferred - nob;
266 nob += BD_GET_KIOV(desc, i).kiov_len;
269 token.data = bsdv->bsd_data;
270 token.len = lustre_msg_buflen(vmsg, voff) -
273 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
277 if (maj != GSS_S_COMPLETE) {
278 CERROR("failed to verify bulk read: %x\n", maj);
281 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
282 desc->bd_nob = bsdv->bsd_nob;
283 if (desc->bd_nob == 0)
286 token.data = bsdv->bsd_data;
287 token.len = lustre_msg_buflen(vmsg, voff) -
290 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
292 if (maj != GSS_S_COMPLETE) {
293 CERROR("failed to decrypt bulk read: %x\n",
298 desc->bd_nob_transferred = desc->bd_nob;
305 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
306 struct gss_ctx *mechctx)
310 if (desc->bd_iov_count == 0)
313 rc = sptlrpc_enc_pool_get_pages(desc);
317 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
323 int gss_cli_prep_bulk(struct ptlrpc_request *req,
324 struct ptlrpc_bulk_desc *desc)
329 LASSERT(req->rq_cli_ctx);
330 LASSERT(req->rq_pack_bulk);
331 LASSERT(req->rq_bulk_read);
333 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
336 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
338 CERROR("bulk read: failed to prepare encryption "
344 int gss_svc_prep_bulk(struct ptlrpc_request *req,
345 struct ptlrpc_bulk_desc *desc)
347 struct gss_svc_reqctx *grctx;
348 struct ptlrpc_bulk_sec_desc *bsd;
352 LASSERT(req->rq_svc_ctx);
353 LASSERT(req->rq_pack_bulk);
354 LASSERT(req->rq_bulk_write);
356 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
357 LASSERT(grctx->src_reqbsd);
358 LASSERT(grctx->src_repbsd);
359 LASSERT(grctx->src_ctx);
360 LASSERT(grctx->src_ctx->gsc_mechctx);
362 bsd = grctx->src_reqbsd;
363 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
366 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
368 CERROR("bulk write: failed to prepare encryption "
374 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
375 struct ptlrpc_bulk_desc *desc)
377 struct gss_svc_reqctx *grctx;
378 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
383 LASSERT(req->rq_svc_ctx);
384 LASSERT(req->rq_pack_bulk);
385 LASSERT(req->rq_bulk_write);
386 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
388 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
390 LASSERT(grctx->src_reqbsd);
391 LASSERT(grctx->src_repbsd);
392 LASSERT(grctx->src_ctx);
393 LASSERT(grctx->src_ctx->gsc_mechctx);
395 bsdr = grctx->src_reqbsd;
396 bsdv = grctx->src_repbsd;
398 /* bsdr has been sanity checked during unpacking */
399 bsdv->bsd_version = 0;
400 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
401 bsdv->bsd_svc = bsdr->bsd_svc;
404 switch (bsdv->bsd_svc) {
405 case SPTLRPC_BULK_SVC_INTG:
406 token.data = bsdr->bsd_data;
407 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
409 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
411 GET_KIOV(desc), &token);
412 if (maj != GSS_S_COMPLETE) {
413 bsdv->bsd_flags |= BSD_FL_ERR;
414 CERROR("failed to verify bulk signature: %x\n", maj);
418 case SPTLRPC_BULK_SVC_PRIV:
419 if (bsdr->bsd_nob != desc->bd_nob) {
420 bsdv->bsd_flags |= BSD_FL_ERR;
421 CERROR("prepared nob %d doesn't match the actual "
422 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
426 if (desc->bd_iov_count == 0) {
427 LASSERT(desc->bd_nob == 0);
431 token.data = bsdr->bsd_data;
432 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
434 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
436 if (maj != GSS_S_COMPLETE) {
437 bsdv->bsd_flags |= BSD_FL_ERR;
438 CERROR("failed decrypt bulk data: %x\n", maj);
442 /* mimic gss_cli_ctx_unwrap_bulk */
443 desc->bd_nob_transferred = desc->bd_nob;
451 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
452 struct ptlrpc_bulk_desc *desc)
454 struct gss_svc_reqctx *grctx;
455 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
461 LASSERT(req->rq_svc_ctx);
462 LASSERT(req->rq_pack_bulk);
463 LASSERT(req->rq_bulk_read);
464 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
466 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
468 LASSERT(grctx->src_reqbsd);
469 LASSERT(grctx->src_repbsd);
470 LASSERT(grctx->src_ctx);
471 LASSERT(grctx->src_ctx->gsc_mechctx);
473 bsdr = grctx->src_reqbsd;
474 bsdv = grctx->src_repbsd;
476 /* bsdr has been sanity checked during unpacking */
477 bsdv->bsd_version = 0;
478 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
479 bsdv->bsd_svc = bsdr->bsd_svc;
482 switch (bsdv->bsd_svc) {
483 case SPTLRPC_BULK_SVC_INTG:
484 token.data = bsdv->bsd_data;
485 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
487 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
489 GET_KIOV(desc), &token);
490 if (maj != GSS_S_COMPLETE) {
491 bsdv->bsd_flags |= BSD_FL_ERR;
492 CERROR("failed to sign bulk data: %x\n", maj);
496 case SPTLRPC_BULK_SVC_PRIV:
497 bsdv->bsd_nob = desc->bd_nob;
499 if (desc->bd_iov_count == 0) {
500 LASSERT(desc->bd_nob == 0);
504 rc = sptlrpc_enc_pool_get_pages(desc);
506 bsdv->bsd_flags |= BSD_FL_ERR;
507 CERROR("bulk read: failed to allocate encryption "
512 token.data = bsdv->bsd_data;
513 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
515 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
517 if (maj != GSS_S_COMPLETE) {
518 bsdv->bsd_flags |= BSD_FL_ERR;
519 CERROR("failed to encrypt bulk data: %x\n", maj);