Whamcloud - gitweb
land b_colibri_devel on HEAD:
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 # define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27 #ifdef __KERNEL__
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/dcache.h>
32 #include <linux/fs.h>
33 #include <linux/random.h>
34 #include <linux/mutex.h>
35 #include <linux/crypto.h>
36 #else
37 #include <liblustre.h>
38 #endif
39
40 #include <obd.h>
41 #include <obd_class.h>
42 #include <obd_support.h>
43 #include <lustre/lustre_idl.h>
44 #include <lustre_net.h>
45 #include <lustre_import.h>
46 #include <lustre_sec.h>
47
48 #include "gss_err.h"
49 #include "gss_internal.h"
50 #include "gss_api.h"
51
52 static
53 int do_bulk_privacy(struct gss_ctx *gctx,
54                     struct ptlrpc_bulk_desc *desc,
55                     int encrypt, __u32 alg,
56                     struct ptlrpc_bulk_sec_desc *bsd)
57 {
58         struct crypto_tfm  *tfm;
59         struct scatterlist  sg, sg2, *sgd;
60         unsigned int        blksize;
61         int                 i, rc;
62         __u8                local_iv[sizeof(bsd->bsd_iv)];
63
64         LASSERT(alg < BULK_PRIV_ALG_MAX);
65
66         if (encrypt)
67                 bsd->bsd_priv_alg = BULK_PRIV_ALG_NULL;
68
69         if (alg == BULK_PRIV_ALG_NULL)
70                 return 0;
71
72         tfm = crypto_alloc_tfm(sptlrpc_bulk_priv_alg2name(alg),
73                                sptlrpc_bulk_priv_alg2flags(alg));
74         if (tfm == NULL) {
75                 CERROR("Failed to allocate TFM %s\n",
76                        sptlrpc_bulk_priv_alg2name(alg));
77                 return -ENOMEM;
78         }
79
80         blksize = crypto_tfm_alg_blocksize(tfm);
81         LASSERT(blksize <= sizeof(local_iv));
82
83         if (encrypt)
84                 get_random_bytes(bsd->bsd_iv, sizeof(bsd->bsd_iv));
85
86         /* compute the secret iv */
87         rc = lgss_plain_encrypt(gctx, 0,
88                                 sizeof(local_iv), bsd->bsd_iv, local_iv);
89         if (rc) {
90                 CERROR("failed to compute secret iv: %d\n", rc);
91                 goto out;
92         }
93
94         rc = crypto_cipher_setkey(tfm, local_iv, sizeof(local_iv));
95         if (rc) {
96                 CERROR("Failed to set key for TFM %s: %d\n",
97                        sptlrpc_bulk_priv_alg2name(alg), rc);
98                 goto out;
99         }
100
101         for (i = 0; i < desc->bd_iov_count; i++) {
102                 sg.page = desc->bd_iov[i].kiov_page;
103                 sg.offset = desc->bd_iov[i].kiov_offset;
104                 sg.length = desc->bd_iov[i].kiov_len;
105
106                 if (desc->bd_enc_pages) {
107                         sg2.page = desc->bd_enc_pages[i];
108                         sg2.offset = desc->bd_iov[i].kiov_offset;
109                         sg2.length = desc->bd_iov[i].kiov_len;
110
111                         sgd = &sg2;
112                 } else
113                         sgd = &sg;
114
115                 if (encrypt)
116                         rc = crypto_cipher_encrypt(tfm, sgd, &sg, sg.length);
117                 else
118                         rc = crypto_cipher_decrypt(tfm, sgd, &sg, sg.length);
119
120                 LASSERT(rc == 0);
121
122                 if (desc->bd_enc_pages)
123                         desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
124
125                 /* although the procedure might be lengthy, the crypto functions
126                  * internally called cond_resched() from time to time.
127                  */
128         }
129
130         if (encrypt)
131                 bsd->bsd_priv_alg = alg;
132
133 out:
134         crypto_free_tfm(tfm);
135         return rc;
136 }
137
138 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
139                           struct ptlrpc_request *req,
140                           struct ptlrpc_bulk_desc *desc)
141 {
142         struct gss_cli_ctx              *gctx;
143         struct lustre_msg               *msg;
144         struct ptlrpc_bulk_sec_desc     *bsdr;
145         int                              offset, rc;
146         ENTRY;
147
148         LASSERT(req->rq_pack_bulk);
149         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
150
151         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
152         case SPTLRPC_SVC_NULL:
153                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
154                 msg = req->rq_reqbuf;
155                 offset = msg->lm_bufcount - 1;
156                 break;
157         case SPTLRPC_SVC_AUTH:
158         case SPTLRPC_SVC_INTG:
159                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
160                 msg = req->rq_reqbuf;
161                 offset = msg->lm_bufcount - 2;
162                 break;
163         case SPTLRPC_SVC_PRIV:
164                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
165                 msg = req->rq_clrbuf;
166                 offset = msg->lm_bufcount - 1;
167                 break;
168         default:
169                 LBUG();
170         }
171
172         /* make checksum */
173         rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
174                                    req->rq_flvr.sf_bulk_csum, msg, offset);
175         if (rc) {
176                 CERROR("client bulk %s: failed to generate checksum: %d\n",
177                        req->rq_bulk_read ? "read" : "write", rc);
178                 RETURN(rc);
179         }
180
181         if (req->rq_flvr.sf_bulk_priv == BULK_PRIV_ALG_NULL)
182                 RETURN(0);
183
184         /* previous bulk_csum_cli_request() has verified bsdr is good */
185         bsdr = lustre_msg_buf(msg, offset, 0);
186
187         if (req->rq_bulk_read) {
188                 bsdr->bsd_priv_alg = req->rq_flvr.sf_bulk_priv;
189                 RETURN(0);
190         }
191
192         /* it turn out to be bulk write */
193         rc = sptlrpc_enc_pool_get_pages(desc);
194         if (rc) {
195                 CERROR("bulk write: failed to allocate encryption pages\n");
196                 RETURN(rc);
197         }
198
199         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
200         LASSERT(gctx->gc_mechctx);
201
202         rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
203                              req->rq_flvr.sf_bulk_priv, bsdr);
204         if (rc)
205                 CERROR("bulk write: client failed to encrypt pages\n");
206
207         RETURN(rc);
208 }
209
210 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
211                             struct ptlrpc_request *req,
212                             struct ptlrpc_bulk_desc *desc)
213 {
214         struct gss_cli_ctx              *gctx;
215         struct lustre_msg               *rmsg, *vmsg;
216         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
217         int                              roff, voff, rc;
218         ENTRY;
219
220         LASSERT(req->rq_pack_bulk);
221         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
222
223         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
224         case SPTLRPC_SVC_NULL:
225                 vmsg = req->rq_repbuf;
226                 voff = vmsg->lm_bufcount - 1;
227                 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
228
229                 rmsg = req->rq_reqbuf;
230                 roff = rmsg->lm_bufcount - 1; /* last segment */
231                 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
232                 break;
233         case SPTLRPC_SVC_AUTH:
234         case SPTLRPC_SVC_INTG:
235                 vmsg = req->rq_repbuf;
236                 voff = vmsg->lm_bufcount - 2;
237                 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
238
239                 rmsg = req->rq_reqbuf;
240                 roff = rmsg->lm_bufcount - 2; /* second last segment */
241                 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
242                 break;
243         case SPTLRPC_SVC_PRIV:
244                 vmsg = req->rq_repbuf;
245                 voff = vmsg->lm_bufcount - 1;
246                 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
247
248                 rmsg = req->rq_clrbuf;
249                 roff = rmsg->lm_bufcount - 1; /* last segment */
250                 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
251                 break;
252         default:
253                 LBUG();
254         }
255
256         if (req->rq_bulk_read) {
257                 bsdr = lustre_msg_buf(rmsg, roff, 0);
258                 if (bsdr->bsd_priv_alg == BULK_PRIV_ALG_NULL)
259                         goto verify_csum;
260
261                 bsdv = lustre_msg_buf(vmsg, voff, 0);
262                 if (bsdr->bsd_priv_alg != bsdv->bsd_priv_alg) {
263                         CERROR("bulk read: cipher algorithm mismatch: client "
264                                "request %s but server reply with %s. try to "
265                                "use the new one for decryption\n",
266                                sptlrpc_bulk_priv_alg2name(bsdr->bsd_priv_alg),
267                                sptlrpc_bulk_priv_alg2name(bsdv->bsd_priv_alg));
268                 }
269
270                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
271                 LASSERT(gctx->gc_mechctx);
272
273                 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
274                                      bsdv->bsd_priv_alg, bsdv);
275                 if (rc) {
276                         CERROR("bulk read: client failed to decrypt data\n");
277                         RETURN(rc);
278                 }
279         }
280
281 verify_csum:
282         rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
283                                  rmsg, roff, vmsg, voff);
284         RETURN(rc);
285 }
286
287 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
288                         struct ptlrpc_bulk_desc *desc)
289 {
290         struct gss_svc_reqctx        *grctx;
291         int                           rc;
292         ENTRY;
293
294         LASSERT(req->rq_svc_ctx);
295         LASSERT(req->rq_pack_bulk);
296         LASSERT(req->rq_bulk_write);
297
298         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
299
300         LASSERT(grctx->src_reqbsd);
301         LASSERT(grctx->src_repbsd);
302         LASSERT(grctx->src_ctx);
303         LASSERT(grctx->src_ctx->gsc_mechctx);
304
305         /* decrypt bulk data if it's encrypted */
306         if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
307                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
308                                      grctx->src_reqbsd->bsd_priv_alg,
309                                      grctx->src_reqbsd);
310                 if (rc) {
311                         CERROR("bulk write: server failed to decrypt data\n");
312                         RETURN(rc);
313                 }
314         }
315
316         /* verify bulk data checksum */
317         rc = bulk_csum_svc(desc, req->rq_bulk_read,
318                            grctx->src_reqbsd, grctx->src_reqbsd_size,
319                            grctx->src_repbsd, grctx->src_repbsd_size);
320
321         RETURN(rc);
322 }
323
324 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
325                       struct ptlrpc_bulk_desc *desc)
326 {
327         struct gss_svc_reqctx        *grctx;
328         int                           rc;
329         ENTRY;
330
331         LASSERT(req->rq_svc_ctx);
332         LASSERT(req->rq_pack_bulk);
333         LASSERT(req->rq_bulk_read);
334
335         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
336
337         LASSERT(grctx->src_reqbsd);
338         LASSERT(grctx->src_repbsd);
339         LASSERT(grctx->src_ctx);
340         LASSERT(grctx->src_ctx->gsc_mechctx);
341
342         /* generate bulk data checksum */
343         rc = bulk_csum_svc(desc, req->rq_bulk_read,
344                            grctx->src_reqbsd, grctx->src_reqbsd_size,
345                            grctx->src_repbsd, grctx->src_repbsd_size);
346         if (rc)
347                 RETURN(rc);
348
349         /* encrypt bulk data if required */
350         if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
351                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
352                                      grctx->src_reqbsd->bsd_priv_alg,
353                                      grctx->src_repbsd);
354                 if (rc)
355                         CERROR("bulk read: server failed to encrypt data: "
356                                "rc %d\n", rc);
357         }
358
359         RETURN(rc);
360 }
361