1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/gss/gss_bulk.c
38 * Author: Eric Mei <eric.mei@sun.com>
42 # define EXPORT_SYMTAB
44 #define DEBUG_SUBSYSTEM S_SEC
46 #include <linux/init.h>
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/dcache.h>
51 #include <linux/random.h>
52 #include <linux/mutex.h>
53 #include <linux/crypto.h>
55 #include <liblustre.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
70 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
71 struct ptlrpc_request *req,
72 struct ptlrpc_bulk_desc *desc)
74 struct gss_cli_ctx *gctx;
75 struct lustre_msg *msg;
76 struct ptlrpc_bulk_sec_desc *bsd;
83 LASSERT(req->rq_pack_bulk);
84 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
86 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
87 LASSERT(gctx->gc_mechctx);
89 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
90 case SPTLRPC_SVC_NULL:
91 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
93 offset = msg->lm_bufcount - 1;
95 case SPTLRPC_SVC_AUTH:
96 case SPTLRPC_SVC_INTG:
97 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
99 offset = msg->lm_bufcount - 2;
101 case SPTLRPC_SVC_PRIV:
102 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
103 msg = req->rq_clrbuf;
104 offset = msg->lm_bufcount - 1;
110 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
111 bsd->bsd_version = 0;
113 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
114 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
116 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
119 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
120 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
122 if (req->rq_bulk_read) {
124 * bulk read: prepare receiving pages only for privacy mode.
126 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
127 return gss_cli_prep_bulk(req, desc);
130 * bulk write: sign or encrypt bulk pages.
132 bsd->bsd_nob = desc->bd_nob;
134 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
136 token.data = bsd->bsd_data;
137 token.len = lustre_msg_buflen(msg, offset) -
140 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
141 desc->bd_iov_count, desc->bd_iov,
143 if (maj != GSS_S_COMPLETE) {
144 CWARN("failed to sign bulk data: %x\n", maj);
149 if (desc->bd_iov_count == 0)
152 rc = sptlrpc_enc_pool_get_pages(desc);
154 CERROR("bulk write: failed to allocate "
155 "encryption pages: %d\n", rc);
159 token.data = bsd->bsd_data;
160 token.len = lustre_msg_buflen(msg, offset) -
163 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
164 if (maj != GSS_S_COMPLETE) {
165 CWARN("fail to encrypt bulk data: %x\n", maj);
174 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
175 struct ptlrpc_request *req,
176 struct ptlrpc_bulk_desc *desc)
178 struct gss_cli_ctx *gctx;
179 struct lustre_msg *rmsg, *vmsg;
180 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
186 LASSERT(req->rq_pack_bulk);
187 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
189 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
190 case SPTLRPC_SVC_NULL:
191 vmsg = req->rq_repdata;
192 voff = vmsg->lm_bufcount - 1;
193 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
195 rmsg = req->rq_reqbuf;
196 roff = rmsg->lm_bufcount - 1; /* last segment */
197 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
199 case SPTLRPC_SVC_AUTH:
200 case SPTLRPC_SVC_INTG:
201 vmsg = req->rq_repdata;
202 voff = vmsg->lm_bufcount - 2;
203 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
205 rmsg = req->rq_reqbuf;
206 roff = rmsg->lm_bufcount - 2; /* second last segment */
207 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
209 case SPTLRPC_SVC_PRIV:
210 vmsg = req->rq_repdata;
211 voff = vmsg->lm_bufcount - 1;
212 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
214 rmsg = req->rq_clrbuf;
215 roff = rmsg->lm_bufcount - 1; /* last segment */
216 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
222 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
223 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
224 LASSERT(bsdr && bsdv);
226 if (bsdr->bsd_version != bsdv->bsd_version ||
227 bsdr->bsd_type != bsdv->bsd_type ||
228 bsdr->bsd_svc != bsdv->bsd_svc) {
229 CERROR("bulk security descriptor mismatch: "
230 "(%u,%u,%u) != (%u,%u,%u)\n",
231 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
232 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
236 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
237 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
238 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
241 * in privacy mode if return success, make sure bd_nob_transferred
242 * is the actual size of the clear text, otherwise upper layer
245 if (req->rq_bulk_write) {
246 if (bsdv->bsd_flags & BSD_FL_ERR) {
247 CERROR("server reported bulk i/o failure\n");
251 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
252 desc->bd_nob_transferred = desc->bd_nob;
255 * bulk read, upon return success, bd_nob_transferred is
256 * the size of plain text actually received.
258 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
259 LASSERT(gctx->gc_mechctx);
261 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
264 /* fix the actual data size */
265 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
266 if (desc->bd_iov[i].kiov_len + nob >
267 desc->bd_nob_transferred) {
268 desc->bd_iov[i].kiov_len =
269 desc->bd_nob_transferred - nob;
271 nob += desc->bd_iov[i].kiov_len;
274 token.data = bsdv->bsd_data;
275 token.len = lustre_msg_buflen(vmsg, voff) -
278 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
279 desc->bd_iov_count, desc->bd_iov,
281 if (maj != GSS_S_COMPLETE) {
282 CERROR("failed to verify bulk read: %x\n", maj);
285 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
286 desc->bd_nob = bsdv->bsd_nob;
287 if (desc->bd_nob == 0)
290 token.data = bsdv->bsd_data;
291 token.len = lustre_msg_buflen(vmsg, voff) -
294 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token);
295 if (maj != GSS_S_COMPLETE) {
296 CERROR("failed to decrypt bulk read: %x\n",
301 desc->bd_nob_transferred = desc->bd_nob;
308 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
309 struct gss_ctx *mechctx)
313 if (desc->bd_iov_count == 0)
316 rc = sptlrpc_enc_pool_get_pages(desc);
320 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
326 int gss_cli_prep_bulk(struct ptlrpc_request *req,
327 struct ptlrpc_bulk_desc *desc)
332 LASSERT(req->rq_cli_ctx);
333 LASSERT(req->rq_pack_bulk);
334 LASSERT(req->rq_bulk_read);
336 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
339 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
341 CERROR("bulk read: failed to prepare encryption "
347 int gss_svc_prep_bulk(struct ptlrpc_request *req,
348 struct ptlrpc_bulk_desc *desc)
350 struct gss_svc_reqctx *grctx;
351 struct ptlrpc_bulk_sec_desc *bsd;
355 LASSERT(req->rq_svc_ctx);
356 LASSERT(req->rq_pack_bulk);
357 LASSERT(req->rq_bulk_write);
359 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
360 LASSERT(grctx->src_reqbsd);
361 LASSERT(grctx->src_repbsd);
362 LASSERT(grctx->src_ctx);
363 LASSERT(grctx->src_ctx->gsc_mechctx);
365 bsd = grctx->src_reqbsd;
366 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
369 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
371 CERROR("bulk write: failed to prepare encryption "
377 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
378 struct ptlrpc_bulk_desc *desc)
380 struct gss_svc_reqctx *grctx;
381 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
386 LASSERT(req->rq_svc_ctx);
387 LASSERT(req->rq_pack_bulk);
388 LASSERT(req->rq_bulk_write);
390 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
392 LASSERT(grctx->src_reqbsd);
393 LASSERT(grctx->src_repbsd);
394 LASSERT(grctx->src_ctx);
395 LASSERT(grctx->src_ctx->gsc_mechctx);
397 bsdr = grctx->src_reqbsd;
398 bsdv = grctx->src_repbsd;
400 /* bsdr has been sanity checked during unpacking */
401 bsdv->bsd_version = 0;
402 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
403 bsdv->bsd_svc = bsdr->bsd_svc;
406 switch (bsdv->bsd_svc) {
407 case SPTLRPC_BULK_SVC_INTG:
408 token.data = bsdr->bsd_data;
409 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
411 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
412 desc->bd_iov_count, desc->bd_iov, &token);
413 if (maj != GSS_S_COMPLETE) {
414 bsdv->bsd_flags |= BSD_FL_ERR;
415 CERROR("failed to verify bulk signature: %x\n", maj);
419 case SPTLRPC_BULK_SVC_PRIV:
420 if (bsdr->bsd_nob != desc->bd_nob) {
421 bsdv->bsd_flags |= BSD_FL_ERR;
422 CERROR("prepared nob %d doesn't match the actual "
423 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
427 if (desc->bd_iov_count == 0) {
428 LASSERT(desc->bd_nob == 0);
432 token.data = bsdr->bsd_data;
433 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
435 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
437 if (maj != GSS_S_COMPLETE) {
438 bsdv->bsd_flags |= BSD_FL_ERR;
439 CERROR("failed decrypt bulk data: %x\n", maj);
448 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
449 struct ptlrpc_bulk_desc *desc)
451 struct gss_svc_reqctx *grctx;
452 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
458 LASSERT(req->rq_svc_ctx);
459 LASSERT(req->rq_pack_bulk);
460 LASSERT(req->rq_bulk_read);
462 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
464 LASSERT(grctx->src_reqbsd);
465 LASSERT(grctx->src_repbsd);
466 LASSERT(grctx->src_ctx);
467 LASSERT(grctx->src_ctx->gsc_mechctx);
469 bsdr = grctx->src_reqbsd;
470 bsdv = grctx->src_repbsd;
472 /* bsdr has been sanity checked during unpacking */
473 bsdv->bsd_version = 0;
474 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
475 bsdv->bsd_svc = bsdr->bsd_svc;
478 switch (bsdv->bsd_svc) {
479 case SPTLRPC_BULK_SVC_INTG:
480 token.data = bsdv->bsd_data;
481 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
483 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
484 desc->bd_iov_count, desc->bd_iov, &token);
485 if (maj != GSS_S_COMPLETE) {
486 bsdv->bsd_flags |= BSD_FL_ERR;
487 CERROR("failed to sign bulk data: %x\n", maj);
491 case SPTLRPC_BULK_SVC_PRIV:
492 bsdv->bsd_nob = desc->bd_nob;
494 if (desc->bd_iov_count == 0) {
495 LASSERT(desc->bd_nob == 0);
499 rc = sptlrpc_enc_pool_get_pages(desc);
501 bsdv->bsd_flags |= BSD_FL_ERR;
502 CERROR("bulk read: failed to allocate encryption "
507 token.data = bsdv->bsd_data;
508 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
510 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
512 if (maj != GSS_S_COMPLETE) {
513 bsdv->bsd_flags |= BSD_FL_ERR;
514 CERROR("failed to encrypt bulk data: %x\n", maj);