1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/gss/gss_bulk.c
38 * Author: Eric Mei <eric.mei@sun.com>
42 # define EXPORT_SYMTAB
44 #define DEBUG_SUBSYSTEM S_SEC
46 #include <linux/init.h>
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/dcache.h>
51 #include <linux/random.h>
52 #include <linux/mutex.h>
53 #include <linux/crypto.h>
55 #include <liblustre.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
67 #include "gss_internal.h"
70 static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
72 static void buf_to_sl(struct scatterlist *sl,
73 void *buf, unsigned int len)
75 sl->page = virt_to_page(buf);
76 sl->offset = offset_in_page(buf);
83 * 2. E(n-1) = Encrypt(K, X(n-1))
84 * 3. C(n) = HEAD(E(n-1))
86 * 5. D(n) = E(n-1) XOR P
87 * 6. C(n-1) = Encrypt(K, D(n))
89 * CTS encryption using standard CBC interface:
90 * 1. pad the last partial block with 0.
91 * 2. do CBC encryption.
92 * 3. swap the last two ciphertext blocks.
93 * 4. truncate to original plaintext size.
95 static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
96 struct scatterlist *sld,
97 struct scatterlist *sls)
99 struct scatterlist slst, sldt;
100 struct blkcipher_desc desc;
102 __u8 sbuf[CIPHER_MAX_BLKSIZE];
103 __u8 dbuf[CIPHER_MAX_BLKSIZE];
104 unsigned int blksize, blks, tail;
107 blksize = ll_crypto_blkcipher_blocksize(tfm);
108 blks = sls->length / blksize;
109 tail = sls->length % blksize;
110 LASSERT(blks > 0 && tail > 0);
112 /* pad tail block with 0, copy to sbuf */
113 data = cfs_kmap(sls->page);
114 memcpy(sbuf, data + sls->offset + blks * blksize, tail);
115 memset(sbuf + tail, 0, blksize - tail);
116 cfs_kunmap(sls->page);
118 buf_to_sl(&slst, sbuf, blksize);
119 buf_to_sl(&sldt, dbuf, blksize);
124 rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
126 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
130 rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
132 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
136 /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
137 data = cfs_kmap(sld->page);
139 memcpy(data + sld->offset + blks * blksize,
140 data + sld->offset + (blks - 1) * blksize, tail);
141 memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
142 cfs_kunmap(sld->page);
148 * CTS CBC decryption:
149 * 1. D(n) = Decrypt(K, C(n-1))
151 * 3. X(n) = D(n) XOR C
152 * 4. P(n) = HEAD(X(n))
153 * 5. E(n-1) = C(n) | TAIL(X(n))
154 * 6. X(n-1) = Decrypt(K, E(n-1))
155 * 7. P(n-1) = X(n-1) XOR C(n-2)
157 * CTS decryption using standard CBC interface:
158 * 1. D(n) = Decrypt(K, C(n-1))
159 * 2. C(n) = C(n) | TAIL(D(n))
160 * 3. swap the last two ciphertext blocks.
161 * 4. do CBC decryption.
162 * 5. truncate to original ciphertext size.
164 static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
165 struct scatterlist *sld,
166 struct scatterlist *sls)
168 struct blkcipher_desc desc;
169 struct scatterlist slst, sldt;
171 __u8 sbuf[CIPHER_MAX_BLKSIZE];
172 __u8 dbuf[CIPHER_MAX_BLKSIZE];
173 unsigned int blksize, blks, tail;
176 blksize = ll_crypto_blkcipher_blocksize(tfm);
177 blks = sls->length / blksize;
178 tail = sls->length % blksize;
179 LASSERT(blks > 0 && tail > 0);
181 /* save current IV, and set IV to zero */
182 ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
183 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
185 /* D(n) = Decrypt(K, C(n-1)) */
187 slst.offset += (blks - 1) * blksize;
188 slst.length = blksize;
190 buf_to_sl(&sldt, dbuf, blksize);
194 rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
196 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
201 ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
203 data = cfs_kmap(sls->page);
204 /* C(n) = C(n) | TAIL(D(n)) */
205 memcpy(dbuf, data + sls->offset + blks * blksize, tail);
206 /* swab C(n) and C(n-1) */
207 memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
208 memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
209 cfs_kunmap(sls->page);
212 buf_to_sl(&slst, sbuf, blksize);
213 buf_to_sl(&sldt, dbuf, blksize);
216 rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
218 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
222 rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
224 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
228 /* truncate to original ciphertext size */
229 data = cfs_kmap(sld->page);
230 memcpy(data + sld->offset + blks * blksize, dbuf, tail);
231 cfs_kunmap(sld->page);
236 static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
238 struct scatterlist *sld,
239 struct scatterlist *sls)
241 #ifndef HAVE_ASYNC_BLOCK_CIPHER
242 LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
246 return cbc_cts_encrypt(tfm, sld, sls);
248 return cbc_cts_decrypt(tfm, sld, sls);
252 * normal encrypt/decrypt of data of even blocksize
254 static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
256 struct scatterlist *sld,
257 struct scatterlist *sls)
259 struct blkcipher_desc desc;
263 return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
265 return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
268 static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
270 const struct sptlrpc_ciph_type *ct;
271 struct ll_crypto_cipher *tfm;
274 /* using ARC4, the only stream cipher in linux for now */
275 ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
278 tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
280 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
283 LASSERT(ll_crypto_blkcipher_blocksize(tfm));
285 if (keylen > ct->sct_keysize)
286 keylen = ct->sct_keysize;
288 LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
289 LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
291 rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
293 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
294 ll_crypto_free_blkcipher(tfm);
301 static int do_bulk_privacy(struct gss_ctx *gctx,
302 struct ptlrpc_bulk_desc *desc,
303 int encrypt, __u32 alg,
304 struct ptlrpc_bulk_sec_desc *bsd)
306 const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
307 struct ll_crypto_cipher *tfm;
308 struct ll_crypto_cipher *stfm = NULL; /* backup stream cipher */
309 struct scatterlist sls, sld, *sldp;
310 unsigned int blksize, keygen_size;
312 __u8 key[CIPHER_MAX_KEYSIZE];
317 bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
319 if (alg == BULK_CIPH_ALG_NULL)
322 if (desc->bd_iov_count <= 0) {
324 bsd->bsd_ciph_alg = alg;
328 tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
330 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
333 blksize = ll_crypto_blkcipher_blocksize(tfm);
335 LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
336 LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
337 LASSERT(ct->sct_ivsize == 0 ||
338 ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
339 LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
340 LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
342 /* generate ramdom key seed and compute the secret key based on it.
343 * note determined by algorithm which lgss_plain_encrypt use, it
344 * might require the key size be its (blocksize * n). so here for
345 * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
346 keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
347 ~(CIPHER_MAX_BLKSIZE - 1);
349 get_random_bytes(bsd->bsd_key, ct->sct_keysize);
350 if (ct->sct_keysize < keygen_size)
351 memset(bsd->bsd_key + ct->sct_keysize, 0,
352 keygen_size - ct->sct_keysize);
355 rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
357 CERROR("failed to compute secret key: %d\n", rc);
361 rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
363 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
367 /* stream cipher doesn't need iv */
369 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
371 for (i = 0; i < desc->bd_iov_count; i++) {
372 sls.page = desc->bd_iov[i].kiov_page;
373 sls.offset = desc->bd_iov[i].kiov_offset;
374 sls.length = desc->bd_iov[i].kiov_len;
376 if (unlikely(sls.length == 0)) {
377 CWARN("page %d with 0 length data?\n", i);
381 if (unlikely(sls.offset % blksize)) {
382 CERROR("page %d with odd offset %u, TFM %s\n",
383 i, sls.offset, ct->sct_name);
388 if (desc->bd_enc_pages) {
389 sld.page = desc->bd_enc_pages[i];
390 sld.offset = desc->bd_iov[i].kiov_offset;
391 sld.length = desc->bd_iov[i].kiov_len;
398 if (likely(sls.length % blksize == 0)) {
399 /* data length is n * blocksize, do the normal tfm */
400 rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
401 } else if (sls.length < blksize) {
402 /* odd data length, and smaller than 1 block, CTS
403 * doesn't work in this case because it requires
404 * transfer a modified IV to peer. here we use a
405 * "backup" stream cipher to do the tfm */
407 stfm = get_stream_cipher(key, ct->sct_keysize);
413 rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
415 /* odd data length but > 1 block, do CTS tfm */
416 rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
420 CERROR("error %s page %d/%d: %d\n",
421 encrypt ? "encrypt" : "decrypt",
422 i + 1, desc->bd_iov_count, rc);
426 if (desc->bd_enc_pages)
427 desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
431 bsd->bsd_ciph_alg = alg;
435 ll_crypto_free_blkcipher(stfm);
437 ll_crypto_free_blkcipher(tfm);
441 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
442 struct ptlrpc_request *req,
443 struct ptlrpc_bulk_desc *desc)
445 struct gss_cli_ctx *gctx;
446 struct lustre_msg *msg;
447 struct ptlrpc_bulk_sec_desc *bsdr;
451 LASSERT(req->rq_pack_bulk);
452 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
454 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
455 case SPTLRPC_SVC_NULL:
456 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
457 msg = req->rq_reqbuf;
458 offset = msg->lm_bufcount - 1;
460 case SPTLRPC_SVC_AUTH:
461 case SPTLRPC_SVC_INTG:
462 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
463 msg = req->rq_reqbuf;
464 offset = msg->lm_bufcount - 2;
466 case SPTLRPC_SVC_PRIV:
467 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
468 msg = req->rq_clrbuf;
469 offset = msg->lm_bufcount - 1;
476 rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
477 req->rq_flvr.sf_bulk_hash, msg, offset);
479 CERROR("client bulk %s: failed to generate checksum: %d\n",
480 req->rq_bulk_read ? "read" : "write", rc);
484 if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
487 /* previous bulk_csum_cli_request() has verified bsdr is good */
488 bsdr = lustre_msg_buf(msg, offset, 0);
490 if (req->rq_bulk_read) {
491 bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
495 /* it turn out to be bulk write */
496 rc = sptlrpc_enc_pool_get_pages(desc);
498 CERROR("bulk write: failed to allocate encryption pages\n");
502 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
503 LASSERT(gctx->gc_mechctx);
505 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
506 req->rq_flvr.sf_bulk_ciph, bsdr);
508 CERROR("bulk write: client failed to encrypt pages\n");
513 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
514 struct ptlrpc_request *req,
515 struct ptlrpc_bulk_desc *desc)
517 struct gss_cli_ctx *gctx;
518 struct lustre_msg *rmsg, *vmsg;
519 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
523 LASSERT(req->rq_pack_bulk);
524 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
526 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
527 case SPTLRPC_SVC_NULL:
528 vmsg = req->rq_repdata;
529 voff = vmsg->lm_bufcount - 1;
530 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
532 rmsg = req->rq_reqbuf;
533 roff = rmsg->lm_bufcount - 1; /* last segment */
534 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
536 case SPTLRPC_SVC_AUTH:
537 case SPTLRPC_SVC_INTG:
538 vmsg = req->rq_repdata;
539 voff = vmsg->lm_bufcount - 2;
540 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
542 rmsg = req->rq_reqbuf;
543 roff = rmsg->lm_bufcount - 2; /* second last segment */
544 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
546 case SPTLRPC_SVC_PRIV:
547 vmsg = req->rq_repdata;
548 voff = vmsg->lm_bufcount - 1;
549 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
551 rmsg = req->rq_clrbuf;
552 roff = rmsg->lm_bufcount - 1; /* last segment */
553 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
559 if (req->rq_bulk_read) {
560 bsdr = lustre_msg_buf(rmsg, roff, 0);
561 if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
564 bsdv = lustre_msg_buf(vmsg, voff, 0);
565 if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
566 CERROR("bulk read: cipher algorithm mismatch: client "
567 "request %s but server reply with %s. try to "
568 "use the new one for decryption\n",
569 sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
570 sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
573 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
574 LASSERT(gctx->gc_mechctx);
576 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
577 bsdv->bsd_ciph_alg, bsdv);
579 CERROR("bulk read: client failed to decrypt data\n");
585 rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
586 rmsg, roff, vmsg, voff);
590 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
591 struct ptlrpc_bulk_desc *desc)
593 struct gss_svc_reqctx *grctx;
597 LASSERT(req->rq_svc_ctx);
598 LASSERT(req->rq_pack_bulk);
599 LASSERT(req->rq_bulk_write);
601 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
603 LASSERT(grctx->src_reqbsd);
604 LASSERT(grctx->src_repbsd);
605 LASSERT(grctx->src_ctx);
606 LASSERT(grctx->src_ctx->gsc_mechctx);
608 /* decrypt bulk data if it's encrypted */
609 if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
610 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
611 grctx->src_reqbsd->bsd_ciph_alg,
614 CERROR("bulk write: server failed to decrypt data\n");
619 /* verify bulk data checksum */
620 rc = bulk_csum_svc(desc, req->rq_bulk_read,
621 grctx->src_reqbsd, grctx->src_reqbsd_size,
622 grctx->src_repbsd, grctx->src_repbsd_size);
627 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
628 struct ptlrpc_bulk_desc *desc)
630 struct gss_svc_reqctx *grctx;
634 LASSERT(req->rq_svc_ctx);
635 LASSERT(req->rq_pack_bulk);
636 LASSERT(req->rq_bulk_read);
638 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
640 LASSERT(grctx->src_reqbsd);
641 LASSERT(grctx->src_repbsd);
642 LASSERT(grctx->src_ctx);
643 LASSERT(grctx->src_ctx->gsc_mechctx);
645 /* generate bulk data checksum */
646 rc = bulk_csum_svc(desc, req->rq_bulk_read,
647 grctx->src_reqbsd, grctx->src_reqbsd_size,
648 grctx->src_repbsd, grctx->src_repbsd_size);
652 /* encrypt bulk data if required */
653 if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
654 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
655 grctx->src_reqbsd->bsd_ciph_alg,
658 CERROR("bulk read: server failed to encrypt data: "