4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/ptlrpc/gss/gss_bulk.c
36 * Author: Eric Mei <eric.mei@sun.com>
40 # define EXPORT_SYMTAB
42 #define DEBUG_SUBSYSTEM S_SEC
44 #include <linux/init.h>
45 #include <linux/module.h>
46 #include <linux/slab.h>
47 #include <linux/dcache.h>
49 #include <linux/mutex.h>
50 #include <linux/crypto.h>
52 #include <liblustre.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre/lustre_idl.h>
59 #include <lustre_net.h>
60 #include <lustre_import.h>
61 #include <lustre_sec.h>
64 #include "gss_internal.h"
67 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
68 struct ptlrpc_request *req,
69 struct ptlrpc_bulk_desc *desc)
71 struct gss_cli_ctx *gctx;
72 struct lustre_msg *msg;
73 struct ptlrpc_bulk_sec_desc *bsd;
80 LASSERT(req->rq_pack_bulk);
81 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
83 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
84 LASSERT(gctx->gc_mechctx);
86 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
87 case SPTLRPC_SVC_NULL:
88 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
90 offset = msg->lm_bufcount - 1;
92 case SPTLRPC_SVC_AUTH:
93 case SPTLRPC_SVC_INTG:
94 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
96 offset = msg->lm_bufcount - 2;
98 case SPTLRPC_SVC_PRIV:
99 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
100 msg = req->rq_clrbuf;
101 offset = msg->lm_bufcount - 1;
107 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
108 bsd->bsd_version = 0;
110 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
111 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
113 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
116 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
117 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
119 if (req->rq_bulk_read) {
121 * bulk read: prepare receiving pages only for privacy mode.
123 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
124 return gss_cli_prep_bulk(req, desc);
127 * bulk write: sign or encrypt bulk pages.
129 bsd->bsd_nob = desc->bd_nob;
131 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
133 token.data = bsd->bsd_data;
134 token.len = lustre_msg_buflen(msg, offset) -
137 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
138 desc->bd_iov_count, desc->bd_iov,
140 if (maj != GSS_S_COMPLETE) {
141 CWARN("failed to sign bulk data: %x\n", maj);
146 if (desc->bd_iov_count == 0)
149 rc = sptlrpc_enc_pool_get_pages(desc);
151 CERROR("bulk write: failed to allocate "
152 "encryption pages: %d\n", rc);
156 token.data = bsd->bsd_data;
157 token.len = lustre_msg_buflen(msg, offset) -
160 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
161 if (maj != GSS_S_COMPLETE) {
162 CWARN("fail to encrypt bulk data: %x\n", maj);
171 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
172 struct ptlrpc_request *req,
173 struct ptlrpc_bulk_desc *desc)
175 struct gss_cli_ctx *gctx;
176 struct lustre_msg *rmsg, *vmsg;
177 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
183 LASSERT(req->rq_pack_bulk);
184 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
186 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
187 case SPTLRPC_SVC_NULL:
188 vmsg = req->rq_repdata;
189 voff = vmsg->lm_bufcount - 1;
190 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
192 rmsg = req->rq_reqbuf;
193 roff = rmsg->lm_bufcount - 1; /* last segment */
194 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
196 case SPTLRPC_SVC_AUTH:
197 case SPTLRPC_SVC_INTG:
198 vmsg = req->rq_repdata;
199 voff = vmsg->lm_bufcount - 2;
200 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
202 rmsg = req->rq_reqbuf;
203 roff = rmsg->lm_bufcount - 2; /* second last segment */
204 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
206 case SPTLRPC_SVC_PRIV:
207 vmsg = req->rq_repdata;
208 voff = vmsg->lm_bufcount - 1;
209 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
211 rmsg = req->rq_clrbuf;
212 roff = rmsg->lm_bufcount - 1; /* last segment */
213 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
219 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
220 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
221 LASSERT(bsdr && bsdv);
223 if (bsdr->bsd_version != bsdv->bsd_version ||
224 bsdr->bsd_type != bsdv->bsd_type ||
225 bsdr->bsd_svc != bsdv->bsd_svc) {
226 CERROR("bulk security descriptor mismatch: "
227 "(%u,%u,%u) != (%u,%u,%u)\n",
228 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
229 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
233 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
234 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
235 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
238 * in privacy mode if return success, make sure bd_nob_transferred
239 * is the actual size of the clear text, otherwise upper layer
242 if (req->rq_bulk_write) {
243 if (bsdv->bsd_flags & BSD_FL_ERR) {
244 CERROR("server reported bulk i/o failure\n");
248 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
249 desc->bd_nob_transferred = desc->bd_nob;
252 * bulk read, upon return success, bd_nob_transferred is
253 * the size of plain text actually received.
255 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
256 LASSERT(gctx->gc_mechctx);
258 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
261 /* fix the actual data size */
262 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
263 if (desc->bd_iov[i].kiov_len + nob >
264 desc->bd_nob_transferred) {
265 desc->bd_iov[i].kiov_len =
266 desc->bd_nob_transferred - nob;
268 nob += desc->bd_iov[i].kiov_len;
271 token.data = bsdv->bsd_data;
272 token.len = lustre_msg_buflen(vmsg, voff) -
275 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
276 desc->bd_iov_count, desc->bd_iov,
278 if (maj != GSS_S_COMPLETE) {
279 CERROR("failed to verify bulk read: %x\n", maj);
282 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
283 desc->bd_nob = bsdv->bsd_nob;
284 if (desc->bd_nob == 0)
287 token.data = bsdv->bsd_data;
288 token.len = lustre_msg_buflen(vmsg, voff) -
291 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
293 if (maj != GSS_S_COMPLETE) {
294 CERROR("failed to decrypt bulk read: %x\n",
299 desc->bd_nob_transferred = desc->bd_nob;
306 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
307 struct gss_ctx *mechctx)
311 if (desc->bd_iov_count == 0)
314 rc = sptlrpc_enc_pool_get_pages(desc);
318 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
324 int gss_cli_prep_bulk(struct ptlrpc_request *req,
325 struct ptlrpc_bulk_desc *desc)
330 LASSERT(req->rq_cli_ctx);
331 LASSERT(req->rq_pack_bulk);
332 LASSERT(req->rq_bulk_read);
334 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
337 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
339 CERROR("bulk read: failed to prepare encryption "
345 int gss_svc_prep_bulk(struct ptlrpc_request *req,
346 struct ptlrpc_bulk_desc *desc)
348 struct gss_svc_reqctx *grctx;
349 struct ptlrpc_bulk_sec_desc *bsd;
353 LASSERT(req->rq_svc_ctx);
354 LASSERT(req->rq_pack_bulk);
355 LASSERT(req->rq_bulk_write);
357 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
358 LASSERT(grctx->src_reqbsd);
359 LASSERT(grctx->src_repbsd);
360 LASSERT(grctx->src_ctx);
361 LASSERT(grctx->src_ctx->gsc_mechctx);
363 bsd = grctx->src_reqbsd;
364 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
367 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
369 CERROR("bulk write: failed to prepare encryption "
375 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
376 struct ptlrpc_bulk_desc *desc)
378 struct gss_svc_reqctx *grctx;
379 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
384 LASSERT(req->rq_svc_ctx);
385 LASSERT(req->rq_pack_bulk);
386 LASSERT(req->rq_bulk_write);
388 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
390 LASSERT(grctx->src_reqbsd);
391 LASSERT(grctx->src_repbsd);
392 LASSERT(grctx->src_ctx);
393 LASSERT(grctx->src_ctx->gsc_mechctx);
395 bsdr = grctx->src_reqbsd;
396 bsdv = grctx->src_repbsd;
398 /* bsdr has been sanity checked during unpacking */
399 bsdv->bsd_version = 0;
400 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
401 bsdv->bsd_svc = bsdr->bsd_svc;
404 switch (bsdv->bsd_svc) {
405 case SPTLRPC_BULK_SVC_INTG:
406 token.data = bsdr->bsd_data;
407 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
409 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
410 desc->bd_iov_count, desc->bd_iov, &token);
411 if (maj != GSS_S_COMPLETE) {
412 bsdv->bsd_flags |= BSD_FL_ERR;
413 CERROR("failed to verify bulk signature: %x\n", maj);
417 case SPTLRPC_BULK_SVC_PRIV:
418 if (bsdr->bsd_nob != desc->bd_nob) {
419 bsdv->bsd_flags |= BSD_FL_ERR;
420 CERROR("prepared nob %d doesn't match the actual "
421 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
425 if (desc->bd_iov_count == 0) {
426 LASSERT(desc->bd_nob == 0);
430 token.data = bsdr->bsd_data;
431 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
433 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
435 if (maj != GSS_S_COMPLETE) {
436 bsdv->bsd_flags |= BSD_FL_ERR;
437 CERROR("failed decrypt bulk data: %x\n", maj);
446 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
447 struct ptlrpc_bulk_desc *desc)
449 struct gss_svc_reqctx *grctx;
450 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
456 LASSERT(req->rq_svc_ctx);
457 LASSERT(req->rq_pack_bulk);
458 LASSERT(req->rq_bulk_read);
460 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
462 LASSERT(grctx->src_reqbsd);
463 LASSERT(grctx->src_repbsd);
464 LASSERT(grctx->src_ctx);
465 LASSERT(grctx->src_ctx->gsc_mechctx);
467 bsdr = grctx->src_reqbsd;
468 bsdv = grctx->src_repbsd;
470 /* bsdr has been sanity checked during unpacking */
471 bsdv->bsd_version = 0;
472 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
473 bsdv->bsd_svc = bsdr->bsd_svc;
476 switch (bsdv->bsd_svc) {
477 case SPTLRPC_BULK_SVC_INTG:
478 token.data = bsdv->bsd_data;
479 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
481 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
482 desc->bd_iov_count, desc->bd_iov, &token);
483 if (maj != GSS_S_COMPLETE) {
484 bsdv->bsd_flags |= BSD_FL_ERR;
485 CERROR("failed to sign bulk data: %x\n", maj);
489 case SPTLRPC_BULK_SVC_PRIV:
490 bsdv->bsd_nob = desc->bd_nob;
492 if (desc->bd_iov_count == 0) {
493 LASSERT(desc->bd_nob == 0);
497 rc = sptlrpc_enc_pool_get_pages(desc);
499 bsdv->bsd_flags |= BSD_FL_ERR;
500 CERROR("bulk read: failed to allocate encryption "
505 token.data = bsdv->bsd_data;
506 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
508 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
510 if (maj != GSS_S_COMPLETE) {
511 bsdv->bsd_flags |= BSD_FL_ERR;
512 CERROR("failed to encrypt bulk data: %x\n", maj);