4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/ptlrpc/gss/gss_bulk.c
36 * Author: Eric Mei <eric.mei@sun.com>
39 #define DEBUG_SUBSYSTEM S_SEC
41 #include <linux/init.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/dcache.h>
46 #include <linux/mutex.h>
47 #include <linux/crypto.h>
49 #include <liblustre.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre/lustre_idl.h>
56 #include <lustre_net.h>
57 #include <lustre_import.h>
58 #include <lustre_sec.h>
61 #include "gss_internal.h"
64 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
65 struct ptlrpc_request *req,
66 struct ptlrpc_bulk_desc *desc)
68 struct gss_cli_ctx *gctx;
69 struct lustre_msg *msg;
70 struct ptlrpc_bulk_sec_desc *bsd;
77 LASSERT(req->rq_pack_bulk);
78 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
80 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
81 LASSERT(gctx->gc_mechctx);
83 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
84 case SPTLRPC_SVC_NULL:
85 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
87 offset = msg->lm_bufcount - 1;
89 case SPTLRPC_SVC_AUTH:
90 case SPTLRPC_SVC_INTG:
91 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
93 offset = msg->lm_bufcount - 2;
95 case SPTLRPC_SVC_PRIV:
96 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
98 offset = msg->lm_bufcount - 1;
104 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
105 bsd->bsd_version = 0;
107 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
108 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
110 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
113 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
114 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
116 if (req->rq_bulk_read) {
118 * bulk read: prepare receiving pages only for privacy mode.
120 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
121 return gss_cli_prep_bulk(req, desc);
124 * bulk write: sign or encrypt bulk pages.
126 bsd->bsd_nob = desc->bd_nob;
128 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
130 token.data = bsd->bsd_data;
131 token.len = lustre_msg_buflen(msg, offset) -
134 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
135 desc->bd_iov_count, desc->bd_iov,
137 if (maj != GSS_S_COMPLETE) {
138 CWARN("failed to sign bulk data: %x\n", maj);
143 if (desc->bd_iov_count == 0)
146 rc = sptlrpc_enc_pool_get_pages(desc);
148 CERROR("bulk write: failed to allocate "
149 "encryption pages: %d\n", rc);
153 token.data = bsd->bsd_data;
154 token.len = lustre_msg_buflen(msg, offset) -
157 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
158 if (maj != GSS_S_COMPLETE) {
159 CWARN("fail to encrypt bulk data: %x\n", maj);
168 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
169 struct ptlrpc_request *req,
170 struct ptlrpc_bulk_desc *desc)
172 struct gss_cli_ctx *gctx;
173 struct lustre_msg *rmsg, *vmsg;
174 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
180 LASSERT(req->rq_pack_bulk);
181 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
183 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
184 case SPTLRPC_SVC_NULL:
185 vmsg = req->rq_repdata;
186 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
187 voff = vmsg->lm_bufcount - 1;
189 rmsg = req->rq_reqbuf;
190 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
191 roff = rmsg->lm_bufcount - 1; /* last segment */
193 case SPTLRPC_SVC_AUTH:
194 case SPTLRPC_SVC_INTG:
195 vmsg = req->rq_repdata;
196 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
197 voff = vmsg->lm_bufcount - 2;
199 rmsg = req->rq_reqbuf;
200 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
201 roff = rmsg->lm_bufcount - 2; /* second last segment */
203 case SPTLRPC_SVC_PRIV:
204 vmsg = req->rq_repdata;
205 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
206 voff = vmsg->lm_bufcount - 1;
208 rmsg = req->rq_clrbuf;
209 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
210 roff = rmsg->lm_bufcount - 1; /* last segment */
216 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
217 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
218 LASSERT(bsdr && bsdv);
220 if (bsdr->bsd_version != bsdv->bsd_version ||
221 bsdr->bsd_type != bsdv->bsd_type ||
222 bsdr->bsd_svc != bsdv->bsd_svc) {
223 CERROR("bulk security descriptor mismatch: "
224 "(%u,%u,%u) != (%u,%u,%u)\n",
225 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
226 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
230 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
231 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
232 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
235 * in privacy mode if return success, make sure bd_nob_transferred
236 * is the actual size of the clear text, otherwise upper layer
239 if (req->rq_bulk_write) {
240 if (bsdv->bsd_flags & BSD_FL_ERR) {
241 CERROR("server reported bulk i/o failure\n");
245 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
246 desc->bd_nob_transferred = desc->bd_nob;
249 * bulk read, upon return success, bd_nob_transferred is
250 * the size of plain text actually received.
252 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
253 LASSERT(gctx->gc_mechctx);
255 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
258 /* fix the actual data size */
259 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
260 if (desc->bd_iov[i].kiov_len + nob >
261 desc->bd_nob_transferred) {
262 desc->bd_iov[i].kiov_len =
263 desc->bd_nob_transferred - nob;
265 nob += desc->bd_iov[i].kiov_len;
268 token.data = bsdv->bsd_data;
269 token.len = lustre_msg_buflen(vmsg, voff) -
272 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
273 desc->bd_iov_count, desc->bd_iov,
275 if (maj != GSS_S_COMPLETE) {
276 CERROR("failed to verify bulk read: %x\n", maj);
279 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
280 desc->bd_nob = bsdv->bsd_nob;
281 if (desc->bd_nob == 0)
284 token.data = bsdv->bsd_data;
285 token.len = lustre_msg_buflen(vmsg, voff) -
288 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
290 if (maj != GSS_S_COMPLETE) {
291 CERROR("failed to decrypt bulk read: %x\n",
296 desc->bd_nob_transferred = desc->bd_nob;
303 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
304 struct gss_ctx *mechctx)
308 if (desc->bd_iov_count == 0)
311 rc = sptlrpc_enc_pool_get_pages(desc);
315 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
321 int gss_cli_prep_bulk(struct ptlrpc_request *req,
322 struct ptlrpc_bulk_desc *desc)
327 LASSERT(req->rq_cli_ctx);
328 LASSERT(req->rq_pack_bulk);
329 LASSERT(req->rq_bulk_read);
331 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
334 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
336 CERROR("bulk read: failed to prepare encryption "
342 int gss_svc_prep_bulk(struct ptlrpc_request *req,
343 struct ptlrpc_bulk_desc *desc)
345 struct gss_svc_reqctx *grctx;
346 struct ptlrpc_bulk_sec_desc *bsd;
350 LASSERT(req->rq_svc_ctx);
351 LASSERT(req->rq_pack_bulk);
352 LASSERT(req->rq_bulk_write);
354 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
355 LASSERT(grctx->src_reqbsd);
356 LASSERT(grctx->src_repbsd);
357 LASSERT(grctx->src_ctx);
358 LASSERT(grctx->src_ctx->gsc_mechctx);
360 bsd = grctx->src_reqbsd;
361 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
364 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
366 CERROR("bulk write: failed to prepare encryption "
372 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
373 struct ptlrpc_bulk_desc *desc)
375 struct gss_svc_reqctx *grctx;
376 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
381 LASSERT(req->rq_svc_ctx);
382 LASSERT(req->rq_pack_bulk);
383 LASSERT(req->rq_bulk_write);
385 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
387 LASSERT(grctx->src_reqbsd);
388 LASSERT(grctx->src_repbsd);
389 LASSERT(grctx->src_ctx);
390 LASSERT(grctx->src_ctx->gsc_mechctx);
392 bsdr = grctx->src_reqbsd;
393 bsdv = grctx->src_repbsd;
395 /* bsdr has been sanity checked during unpacking */
396 bsdv->bsd_version = 0;
397 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
398 bsdv->bsd_svc = bsdr->bsd_svc;
401 switch (bsdv->bsd_svc) {
402 case SPTLRPC_BULK_SVC_INTG:
403 token.data = bsdr->bsd_data;
404 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
406 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
407 desc->bd_iov_count, desc->bd_iov, &token);
408 if (maj != GSS_S_COMPLETE) {
409 bsdv->bsd_flags |= BSD_FL_ERR;
410 CERROR("failed to verify bulk signature: %x\n", maj);
414 case SPTLRPC_BULK_SVC_PRIV:
415 if (bsdr->bsd_nob != desc->bd_nob) {
416 bsdv->bsd_flags |= BSD_FL_ERR;
417 CERROR("prepared nob %d doesn't match the actual "
418 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
422 if (desc->bd_iov_count == 0) {
423 LASSERT(desc->bd_nob == 0);
427 token.data = bsdr->bsd_data;
428 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
430 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
432 if (maj != GSS_S_COMPLETE) {
433 bsdv->bsd_flags |= BSD_FL_ERR;
434 CERROR("failed decrypt bulk data: %x\n", maj);
443 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
444 struct ptlrpc_bulk_desc *desc)
446 struct gss_svc_reqctx *grctx;
447 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
453 LASSERT(req->rq_svc_ctx);
454 LASSERT(req->rq_pack_bulk);
455 LASSERT(req->rq_bulk_read);
457 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
459 LASSERT(grctx->src_reqbsd);
460 LASSERT(grctx->src_repbsd);
461 LASSERT(grctx->src_ctx);
462 LASSERT(grctx->src_ctx->gsc_mechctx);
464 bsdr = grctx->src_reqbsd;
465 bsdv = grctx->src_repbsd;
467 /* bsdr has been sanity checked during unpacking */
468 bsdv->bsd_version = 0;
469 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
470 bsdv->bsd_svc = bsdr->bsd_svc;
473 switch (bsdv->bsd_svc) {
474 case SPTLRPC_BULK_SVC_INTG:
475 token.data = bsdv->bsd_data;
476 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
478 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
479 desc->bd_iov_count, desc->bd_iov, &token);
480 if (maj != GSS_S_COMPLETE) {
481 bsdv->bsd_flags |= BSD_FL_ERR;
482 CERROR("failed to sign bulk data: %x\n", maj);
486 case SPTLRPC_BULK_SVC_PRIV:
487 bsdv->bsd_nob = desc->bd_nob;
489 if (desc->bd_iov_count == 0) {
490 LASSERT(desc->bd_nob == 0);
494 rc = sptlrpc_enc_pool_get_pages(desc);
496 bsdv->bsd_flags |= BSD_FL_ERR;
497 CERROR("bulk read: failed to allocate encryption "
502 token.data = bsdv->bsd_data;
503 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
505 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
507 if (maj != GSS_S_COMPLETE) {
508 bsdv->bsd_flags |= BSD_FL_ERR;
509 CERROR("failed to encrypt bulk data: %x\n", maj);