1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2008 Sun Microsystems. Inc.
5 * Author: Eric Mei <eric.mei@sun.com>
6 * Copyright (C) 2006,2007 Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 # define EXPORT_SYMTAB
28 #define DEBUG_SUBSYSTEM S_SEC
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/dcache.h>
35 #include <linux/random.h>
36 #include <linux/mutex.h>
37 #include <linux/crypto.h>
39 #include <liblustre.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <lustre_import.h>
48 #include <lustre_sec.h>
51 #include "gss_internal.h"
54 static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
56 static void buf_to_sl(struct scatterlist *sl,
57 void *buf, unsigned int len)
59 sl->page = virt_to_page(buf);
60 sl->offset = offset_in_page(buf);
67 * 2. E(n-1) = Encrypt(K, X(n-1))
68 * 3. C(n) = HEAD(E(n-1))
70 * 5. D(n) = E(n-1) XOR P
71 * 6. C(n-1) = Encrypt(K, D(n))
73 * CTS encryption using standard CBC interface:
74 * 1. pad the last partial block with 0.
75 * 2. do CBC encryption.
76 * 3. swap the last two ciphertext blocks.
77 * 4. truncate to original plaintext size.
79 static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
80 struct scatterlist *sld,
81 struct scatterlist *sls)
83 struct scatterlist slst, sldt;
84 struct blkcipher_desc desc;
86 __u8 sbuf[CIPHER_MAX_BLKSIZE];
87 __u8 dbuf[CIPHER_MAX_BLKSIZE];
88 unsigned int blksize, blks, tail;
91 blksize = ll_crypto_blkcipher_blocksize(tfm);
92 blks = sls->length / blksize;
93 tail = sls->length % blksize;
94 LASSERT(blks > 0 && tail > 0);
96 /* pad tail block with 0, copy to sbuf */
97 data = cfs_kmap(sls->page);
98 memcpy(sbuf, data + sls->offset + blks * blksize, tail);
99 memset(sbuf + tail, 0, blksize - tail);
100 cfs_kunmap(sls->page);
102 buf_to_sl(&slst, sbuf, blksize);
103 buf_to_sl(&sldt, dbuf, blksize);
108 rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
110 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
114 rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
116 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
120 /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
121 data = cfs_kmap(sld->page);
123 memcpy(data + sld->offset + blks * blksize,
124 data + sld->offset + (blks - 1) * blksize, tail);
125 memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
126 cfs_kunmap(sld->page);
132 * CTS CBC decryption:
133 * 1. D(n) = Decrypt(K, C(n-1))
135 * 3. X(n) = D(n) XOR C
136 * 4. P(n) = HEAD(X(n))
137 * 5. E(n-1) = C(n) | TAIL(X(n))
138 * 6. X(n-1) = Decrypt(K, E(n-1))
139 * 7. P(n-1) = X(n-1) XOR C(n-2)
141 * CTS decryption using standard CBC interface:
142 * 1. D(n) = Decrypt(K, C(n-1))
143 * 2. C(n) = C(n) | TAIL(D(n))
144 * 3. swap the last two ciphertext blocks.
145 * 4. do CBC decryption.
146 * 5. truncate to original ciphertext size.
148 static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
149 struct scatterlist *sld,
150 struct scatterlist *sls)
152 struct blkcipher_desc desc;
153 struct scatterlist slst, sldt;
155 __u8 sbuf[CIPHER_MAX_BLKSIZE];
156 __u8 dbuf[CIPHER_MAX_BLKSIZE];
157 unsigned int blksize, blks, tail;
160 blksize = ll_crypto_blkcipher_blocksize(tfm);
161 blks = sls->length / blksize;
162 tail = sls->length % blksize;
163 LASSERT(blks > 0 && tail > 0);
165 /* save current IV, and set IV to zero */
166 ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
167 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
169 /* D(n) = Decrypt(K, C(n-1)) */
171 slst.offset += (blks - 1) * blksize;
172 slst.length = blksize;
174 buf_to_sl(&sldt, dbuf, blksize);
178 rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
180 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
185 ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
187 data = cfs_kmap(sls->page);
188 /* C(n) = C(n) | TAIL(D(n)) */
189 memcpy(dbuf, data + sls->offset + blks * blksize, tail);
190 /* swab C(n) and C(n-1) */
191 memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
192 memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
193 cfs_kunmap(sls->page);
196 buf_to_sl(&slst, sbuf, blksize);
197 buf_to_sl(&sldt, dbuf, blksize);
200 rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
202 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
206 rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
208 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
212 /* truncate to original ciphertext size */
213 data = cfs_kmap(sld->page);
214 memcpy(data + sld->offset + blks * blksize, dbuf, tail);
215 cfs_kunmap(sld->page);
220 static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
222 struct scatterlist *sld,
223 struct scatterlist *sls)
225 #ifndef HAVE_ASYNC_BLOCK_CIPHER
226 LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
230 return cbc_cts_encrypt(tfm, sld, sls);
232 return cbc_cts_decrypt(tfm, sld, sls);
236 * normal encrypt/decrypt of data of even blocksize
238 static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
240 struct scatterlist *sld,
241 struct scatterlist *sls)
243 struct blkcipher_desc desc;
247 return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
249 return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
252 static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
254 const struct sptlrpc_ciph_type *ct;
255 struct ll_crypto_cipher *tfm;
258 /* using ARC4, the only stream cipher in linux for now */
259 ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
262 tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
264 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
267 LASSERT(ll_crypto_blkcipher_blocksize(tfm));
269 if (keylen > ct->sct_keysize)
270 keylen = ct->sct_keysize;
272 LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
273 LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
275 rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
277 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
278 ll_crypto_free_blkcipher(tfm);
285 static int do_bulk_privacy(struct gss_ctx *gctx,
286 struct ptlrpc_bulk_desc *desc,
287 int encrypt, __u32 alg,
288 struct ptlrpc_bulk_sec_desc *bsd)
290 const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
291 struct ll_crypto_cipher *tfm;
292 struct ll_crypto_cipher *stfm = NULL; /* backup stream cipher */
293 struct scatterlist sls, sld, *sldp;
294 unsigned int blksize, keygen_size;
296 __u8 key[CIPHER_MAX_KEYSIZE];
301 bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
303 if (alg == BULK_CIPH_ALG_NULL)
306 if (desc->bd_iov_count <= 0) {
308 bsd->bsd_ciph_alg = alg;
312 tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
314 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
317 blksize = ll_crypto_blkcipher_blocksize(tfm);
319 LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
320 LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
321 LASSERT(ct->sct_ivsize == 0 ||
322 ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
323 LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
324 LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
326 /* generate ramdom key seed and compute the secret key based on it.
327 * note determined by algorithm which lgss_plain_encrypt use, it
328 * might require the key size be its (blocksize * n). so here for
329 * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
330 keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
331 ~(CIPHER_MAX_BLKSIZE - 1);
333 get_random_bytes(bsd->bsd_key, ct->sct_keysize);
334 if (ct->sct_keysize < keygen_size)
335 memset(bsd->bsd_key + ct->sct_keysize, 0,
336 keygen_size - ct->sct_keysize);
339 rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
341 CERROR("failed to compute secret key: %d\n", rc);
345 rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
347 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
351 /* stream cipher doesn't need iv */
353 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
355 for (i = 0; i < desc->bd_iov_count; i++) {
356 sls.page = desc->bd_iov[i].kiov_page;
357 sls.offset = desc->bd_iov[i].kiov_offset;
358 sls.length = desc->bd_iov[i].kiov_len;
360 if (unlikely(sls.length == 0)) {
361 CWARN("page %d with 0 length data?\n", i);
365 if (unlikely(sls.offset % blksize)) {
366 CERROR("page %d with odd offset %u, TFM %s\n",
367 i, sls.offset, ct->sct_name);
372 if (desc->bd_enc_pages) {
373 sld.page = desc->bd_enc_pages[i];
374 sld.offset = desc->bd_iov[i].kiov_offset;
375 sld.length = desc->bd_iov[i].kiov_len;
382 if (likely(sls.length % blksize == 0)) {
383 /* data length is n * blocksize, do the normal tfm */
384 rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
385 } else if (sls.length < blksize) {
386 /* odd data length, and smaller than 1 block, CTS
387 * doesn't work in this case because it requires
388 * transfer a modified IV to peer. here we use a
389 * "backup" stream cipher to do the tfm */
391 stfm = get_stream_cipher(key, ct->sct_keysize);
397 rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
399 /* odd data length but > 1 block, do CTS tfm */
400 rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
404 CERROR("error %s page %d/%d: %d\n",
405 encrypt ? "encrypt" : "decrypt",
406 i + 1, desc->bd_iov_count, rc);
410 if (desc->bd_enc_pages)
411 desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
415 bsd->bsd_ciph_alg = alg;
419 ll_crypto_free_blkcipher(stfm);
421 ll_crypto_free_blkcipher(tfm);
425 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
426 struct ptlrpc_request *req,
427 struct ptlrpc_bulk_desc *desc)
429 struct gss_cli_ctx *gctx;
430 struct lustre_msg *msg;
431 struct ptlrpc_bulk_sec_desc *bsdr;
435 LASSERT(req->rq_pack_bulk);
436 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
438 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
439 case SPTLRPC_SVC_NULL:
440 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
441 msg = req->rq_reqbuf;
442 offset = msg->lm_bufcount - 1;
444 case SPTLRPC_SVC_AUTH:
445 case SPTLRPC_SVC_INTG:
446 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
447 msg = req->rq_reqbuf;
448 offset = msg->lm_bufcount - 2;
450 case SPTLRPC_SVC_PRIV:
451 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
452 msg = req->rq_clrbuf;
453 offset = msg->lm_bufcount - 1;
460 rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
461 req->rq_flvr.sf_bulk_hash, msg, offset);
463 CERROR("client bulk %s: failed to generate checksum: %d\n",
464 req->rq_bulk_read ? "read" : "write", rc);
468 if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
471 /* previous bulk_csum_cli_request() has verified bsdr is good */
472 bsdr = lustre_msg_buf(msg, offset, 0);
474 if (req->rq_bulk_read) {
475 bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
479 /* it turn out to be bulk write */
480 rc = sptlrpc_enc_pool_get_pages(desc);
482 CERROR("bulk write: failed to allocate encryption pages\n");
486 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
487 LASSERT(gctx->gc_mechctx);
489 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
490 req->rq_flvr.sf_bulk_ciph, bsdr);
492 CERROR("bulk write: client failed to encrypt pages\n");
497 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
498 struct ptlrpc_request *req,
499 struct ptlrpc_bulk_desc *desc)
501 struct gss_cli_ctx *gctx;
502 struct lustre_msg *rmsg, *vmsg;
503 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
507 LASSERT(req->rq_pack_bulk);
508 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
510 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
511 case SPTLRPC_SVC_NULL:
512 vmsg = req->rq_repdata;
513 voff = vmsg->lm_bufcount - 1;
514 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
516 rmsg = req->rq_reqbuf;
517 roff = rmsg->lm_bufcount - 1; /* last segment */
518 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
520 case SPTLRPC_SVC_AUTH:
521 case SPTLRPC_SVC_INTG:
522 vmsg = req->rq_repdata;
523 voff = vmsg->lm_bufcount - 2;
524 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
526 rmsg = req->rq_reqbuf;
527 roff = rmsg->lm_bufcount - 2; /* second last segment */
528 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
530 case SPTLRPC_SVC_PRIV:
531 vmsg = req->rq_repdata;
532 voff = vmsg->lm_bufcount - 1;
533 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
535 rmsg = req->rq_clrbuf;
536 roff = rmsg->lm_bufcount - 1; /* last segment */
537 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
543 if (req->rq_bulk_read) {
544 bsdr = lustre_msg_buf(rmsg, roff, 0);
545 if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
548 bsdv = lustre_msg_buf(vmsg, voff, 0);
549 if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
550 CERROR("bulk read: cipher algorithm mismatch: client "
551 "request %s but server reply with %s. try to "
552 "use the new one for decryption\n",
553 sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
554 sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
557 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
558 LASSERT(gctx->gc_mechctx);
560 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
561 bsdv->bsd_ciph_alg, bsdv);
563 CERROR("bulk read: client failed to decrypt data\n");
569 rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
570 rmsg, roff, vmsg, voff);
574 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
575 struct ptlrpc_bulk_desc *desc)
577 struct gss_svc_reqctx *grctx;
581 LASSERT(req->rq_svc_ctx);
582 LASSERT(req->rq_pack_bulk);
583 LASSERT(req->rq_bulk_write);
585 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
587 LASSERT(grctx->src_reqbsd);
588 LASSERT(grctx->src_repbsd);
589 LASSERT(grctx->src_ctx);
590 LASSERT(grctx->src_ctx->gsc_mechctx);
592 /* decrypt bulk data if it's encrypted */
593 if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
594 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
595 grctx->src_reqbsd->bsd_ciph_alg,
598 CERROR("bulk write: server failed to decrypt data\n");
603 /* verify bulk data checksum */
604 rc = bulk_csum_svc(desc, req->rq_bulk_read,
605 grctx->src_reqbsd, grctx->src_reqbsd_size,
606 grctx->src_repbsd, grctx->src_repbsd_size);
611 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
612 struct ptlrpc_bulk_desc *desc)
614 struct gss_svc_reqctx *grctx;
618 LASSERT(req->rq_svc_ctx);
619 LASSERT(req->rq_pack_bulk);
620 LASSERT(req->rq_bulk_read);
622 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
624 LASSERT(grctx->src_reqbsd);
625 LASSERT(grctx->src_repbsd);
626 LASSERT(grctx->src_ctx);
627 LASSERT(grctx->src_ctx->gsc_mechctx);
629 /* generate bulk data checksum */
630 rc = bulk_csum_svc(desc, req->rq_bulk_read,
631 grctx->src_reqbsd, grctx->src_reqbsd_size,
632 grctx->src_repbsd, grctx->src_repbsd_size);
636 /* encrypt bulk data if required */
637 if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
638 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
639 grctx->src_reqbsd->bsd_ciph_alg,
642 CERROR("bulk read: server failed to encrypt data: "