4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/gss/gss_bulk.c
38 * Author: Eric Mei <eric.mei@sun.com>
41 #define DEBUG_SUBSYSTEM S_SEC
42 #include <linux/init.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/dcache.h>
47 #include <linux/mutex.h>
48 #include <linux/crypto.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre/lustre_idl.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_sec.h>
59 #include "gss_internal.h"
62 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
63 struct ptlrpc_request *req,
64 struct ptlrpc_bulk_desc *desc)
66 struct gss_cli_ctx *gctx;
67 struct lustre_msg *msg;
68 struct ptlrpc_bulk_sec_desc *bsd;
75 LASSERT(req->rq_pack_bulk);
76 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
78 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
79 LASSERT(gctx->gc_mechctx);
81 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
82 case SPTLRPC_SVC_NULL:
83 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
85 offset = msg->lm_bufcount - 1;
87 case SPTLRPC_SVC_AUTH:
88 case SPTLRPC_SVC_INTG:
89 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
91 offset = msg->lm_bufcount - 2;
93 case SPTLRPC_SVC_PRIV:
94 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
96 offset = msg->lm_bufcount - 1;
102 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
103 bsd->bsd_version = 0;
105 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
106 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
108 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
111 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
112 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
114 if (req->rq_bulk_read) {
116 * bulk read: prepare receiving pages only for privacy mode.
118 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
119 return gss_cli_prep_bulk(req, desc);
122 * bulk write: sign or encrypt bulk pages.
124 bsd->bsd_nob = desc->bd_nob;
126 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
128 token.data = bsd->bsd_data;
129 token.len = lustre_msg_buflen(msg, offset) -
132 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
133 desc->bd_iov_count, desc->bd_iov,
135 if (maj != GSS_S_COMPLETE) {
136 CWARN("failed to sign bulk data: %x\n", maj);
141 if (desc->bd_iov_count == 0)
144 rc = sptlrpc_enc_pool_get_pages(desc);
146 CERROR("bulk write: failed to allocate "
147 "encryption pages: %d\n", rc);
151 token.data = bsd->bsd_data;
152 token.len = lustre_msg_buflen(msg, offset) -
155 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
156 if (maj != GSS_S_COMPLETE) {
157 CWARN("fail to encrypt bulk data: %x\n", maj);
166 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
167 struct ptlrpc_request *req,
168 struct ptlrpc_bulk_desc *desc)
170 struct gss_cli_ctx *gctx;
171 struct lustre_msg *rmsg, *vmsg;
172 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
178 LASSERT(req->rq_pack_bulk);
179 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
181 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
182 case SPTLRPC_SVC_NULL:
183 vmsg = req->rq_repdata;
184 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
185 voff = vmsg->lm_bufcount - 1;
187 rmsg = req->rq_reqbuf;
188 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
189 roff = rmsg->lm_bufcount - 1; /* last segment */
191 case SPTLRPC_SVC_AUTH:
192 case SPTLRPC_SVC_INTG:
193 vmsg = req->rq_repdata;
194 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
195 voff = vmsg->lm_bufcount - 2;
197 rmsg = req->rq_reqbuf;
198 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
199 roff = rmsg->lm_bufcount - 2; /* second last segment */
201 case SPTLRPC_SVC_PRIV:
202 vmsg = req->rq_repdata;
203 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
204 voff = vmsg->lm_bufcount - 1;
206 rmsg = req->rq_clrbuf;
207 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
208 roff = rmsg->lm_bufcount - 1; /* last segment */
214 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
215 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
216 LASSERT(bsdr && bsdv);
218 if (bsdr->bsd_version != bsdv->bsd_version ||
219 bsdr->bsd_type != bsdv->bsd_type ||
220 bsdr->bsd_svc != bsdv->bsd_svc) {
221 CERROR("bulk security descriptor mismatch: "
222 "(%u,%u,%u) != (%u,%u,%u)\n",
223 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
224 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
228 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
229 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
230 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
233 * in privacy mode if return success, make sure bd_nob_transferred
234 * is the actual size of the clear text, otherwise upper layer
237 if (req->rq_bulk_write) {
238 if (bsdv->bsd_flags & BSD_FL_ERR) {
239 CERROR("server reported bulk i/o failure\n");
243 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
244 desc->bd_nob_transferred = desc->bd_nob;
247 * bulk read, upon return success, bd_nob_transferred is
248 * the size of plain text actually received.
250 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
251 LASSERT(gctx->gc_mechctx);
253 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
256 /* fix the actual data size */
257 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
258 if (desc->bd_iov[i].kiov_len + nob >
259 desc->bd_nob_transferred) {
260 desc->bd_iov[i].kiov_len =
261 desc->bd_nob_transferred - nob;
263 nob += desc->bd_iov[i].kiov_len;
266 token.data = bsdv->bsd_data;
267 token.len = lustre_msg_buflen(vmsg, voff) -
270 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
271 desc->bd_iov_count, desc->bd_iov,
273 if (maj != GSS_S_COMPLETE) {
274 CERROR("failed to verify bulk read: %x\n", maj);
277 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
278 desc->bd_nob = bsdv->bsd_nob;
279 if (desc->bd_nob == 0)
282 token.data = bsdv->bsd_data;
283 token.len = lustre_msg_buflen(vmsg, voff) -
286 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
288 if (maj != GSS_S_COMPLETE) {
289 CERROR("failed to decrypt bulk read: %x\n",
294 desc->bd_nob_transferred = desc->bd_nob;
301 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
302 struct gss_ctx *mechctx)
306 if (desc->bd_iov_count == 0)
309 rc = sptlrpc_enc_pool_get_pages(desc);
313 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
319 int gss_cli_prep_bulk(struct ptlrpc_request *req,
320 struct ptlrpc_bulk_desc *desc)
325 LASSERT(req->rq_cli_ctx);
326 LASSERT(req->rq_pack_bulk);
327 LASSERT(req->rq_bulk_read);
329 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
332 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
334 CERROR("bulk read: failed to prepare encryption "
340 int gss_svc_prep_bulk(struct ptlrpc_request *req,
341 struct ptlrpc_bulk_desc *desc)
343 struct gss_svc_reqctx *grctx;
344 struct ptlrpc_bulk_sec_desc *bsd;
348 LASSERT(req->rq_svc_ctx);
349 LASSERT(req->rq_pack_bulk);
350 LASSERT(req->rq_bulk_write);
352 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
353 LASSERT(grctx->src_reqbsd);
354 LASSERT(grctx->src_repbsd);
355 LASSERT(grctx->src_ctx);
356 LASSERT(grctx->src_ctx->gsc_mechctx);
358 bsd = grctx->src_reqbsd;
359 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
362 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
364 CERROR("bulk write: failed to prepare encryption "
370 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
371 struct ptlrpc_bulk_desc *desc)
373 struct gss_svc_reqctx *grctx;
374 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
379 LASSERT(req->rq_svc_ctx);
380 LASSERT(req->rq_pack_bulk);
381 LASSERT(req->rq_bulk_write);
383 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
385 LASSERT(grctx->src_reqbsd);
386 LASSERT(grctx->src_repbsd);
387 LASSERT(grctx->src_ctx);
388 LASSERT(grctx->src_ctx->gsc_mechctx);
390 bsdr = grctx->src_reqbsd;
391 bsdv = grctx->src_repbsd;
393 /* bsdr has been sanity checked during unpacking */
394 bsdv->bsd_version = 0;
395 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
396 bsdv->bsd_svc = bsdr->bsd_svc;
399 switch (bsdv->bsd_svc) {
400 case SPTLRPC_BULK_SVC_INTG:
401 token.data = bsdr->bsd_data;
402 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
404 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
405 desc->bd_iov_count, desc->bd_iov, &token);
406 if (maj != GSS_S_COMPLETE) {
407 bsdv->bsd_flags |= BSD_FL_ERR;
408 CERROR("failed to verify bulk signature: %x\n", maj);
412 case SPTLRPC_BULK_SVC_PRIV:
413 if (bsdr->bsd_nob != desc->bd_nob) {
414 bsdv->bsd_flags |= BSD_FL_ERR;
415 CERROR("prepared nob %d doesn't match the actual "
416 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
420 if (desc->bd_iov_count == 0) {
421 LASSERT(desc->bd_nob == 0);
425 token.data = bsdr->bsd_data;
426 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
428 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
430 if (maj != GSS_S_COMPLETE) {
431 bsdv->bsd_flags |= BSD_FL_ERR;
432 CERROR("failed decrypt bulk data: %x\n", maj);
436 /* mimic gss_cli_ctx_unwrap_bulk */
437 desc->bd_nob_transferred = desc->bd_nob;
445 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
446 struct ptlrpc_bulk_desc *desc)
448 struct gss_svc_reqctx *grctx;
449 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
455 LASSERT(req->rq_svc_ctx);
456 LASSERT(req->rq_pack_bulk);
457 LASSERT(req->rq_bulk_read);
459 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
461 LASSERT(grctx->src_reqbsd);
462 LASSERT(grctx->src_repbsd);
463 LASSERT(grctx->src_ctx);
464 LASSERT(grctx->src_ctx->gsc_mechctx);
466 bsdr = grctx->src_reqbsd;
467 bsdv = grctx->src_repbsd;
469 /* bsdr has been sanity checked during unpacking */
470 bsdv->bsd_version = 0;
471 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
472 bsdv->bsd_svc = bsdr->bsd_svc;
475 switch (bsdv->bsd_svc) {
476 case SPTLRPC_BULK_SVC_INTG:
477 token.data = bsdv->bsd_data;
478 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
480 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
481 desc->bd_iov_count, desc->bd_iov, &token);
482 if (maj != GSS_S_COMPLETE) {
483 bsdv->bsd_flags |= BSD_FL_ERR;
484 CERROR("failed to sign bulk data: %x\n", maj);
488 case SPTLRPC_BULK_SVC_PRIV:
489 bsdv->bsd_nob = desc->bd_nob;
491 if (desc->bd_iov_count == 0) {
492 LASSERT(desc->bd_nob == 0);
496 rc = sptlrpc_enc_pool_get_pages(desc);
498 bsdv->bsd_flags |= BSD_FL_ERR;
499 CERROR("bulk read: failed to allocate encryption "
504 token.data = bsdv->bsd_data;
505 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
507 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
509 if (maj != GSS_S_COMPLETE) {
510 bsdv->bsd_flags |= BSD_FL_ERR;
511 CERROR("failed to encrypt bulk data: %x\n", maj);