Whamcloud - gitweb
LU-7243 misc: update Intel copyright messages 2015
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2014, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/gss/gss_bulk.c
37  *
38  * Author: Eric Mei <eric.mei@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42 #include <linux/init.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/dcache.h>
46 #include <linux/fs.h>
47 #include <linux/mutex.h>
48 #include <linux/crypto.h>
49
50 #include <obd.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre/lustre_idl.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_sec.h>
57
58 #include "gss_err.h"
59 #include "gss_internal.h"
60 #include "gss_api.h"
61
62 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
63                           struct ptlrpc_request *req,
64                           struct ptlrpc_bulk_desc *desc)
65 {
66         struct gss_cli_ctx              *gctx;
67         struct lustre_msg               *msg;
68         struct ptlrpc_bulk_sec_desc     *bsd;
69         rawobj_t                         token;
70         __u32                            maj;
71         int                              offset;
72         int                              rc;
73         ENTRY;
74
75         LASSERT(req->rq_pack_bulk);
76         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
77         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
78
79         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
80         LASSERT(gctx->gc_mechctx);
81
82         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
83         case SPTLRPC_SVC_NULL:
84                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
85                 msg = req->rq_reqbuf;
86                 offset = msg->lm_bufcount - 1;
87                 break;
88         case SPTLRPC_SVC_AUTH:
89         case SPTLRPC_SVC_INTG:
90                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
91                 msg = req->rq_reqbuf;
92                 offset = msg->lm_bufcount - 2;
93                 break;
94         case SPTLRPC_SVC_PRIV:
95                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
96                 msg = req->rq_clrbuf;
97                 offset = msg->lm_bufcount - 1;
98                 break;
99         default:
100                 LBUG();
101         }
102
103         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
104         bsd->bsd_version = 0;
105         bsd->bsd_flags = 0;
106         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
107         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
108
109         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
110                 RETURN(0);
111
112         LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
113                 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
114
115         if (req->rq_bulk_read) {
116                 /*
117                  * bulk read: prepare receiving pages only for privacy mode.
118                  */
119                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
120                         return gss_cli_prep_bulk(req, desc);
121         } else {
122                 /*
123                  * bulk write: sign or encrypt bulk pages.
124                  */
125                 bsd->bsd_nob = desc->bd_nob;
126
127                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
128                         /* integrity mode */
129                         token.data = bsd->bsd_data;
130                         token.len = lustre_msg_buflen(msg, offset) -
131                                     sizeof(*bsd);
132
133                         maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
134                                            desc->bd_iov_count,
135                                            GET_KIOV(desc),
136                                            &token);
137                         if (maj != GSS_S_COMPLETE) {
138                                 CWARN("failed to sign bulk data: %x\n", maj);
139                                 RETURN(-EACCES);
140                         }
141                 } else {
142                         /* privacy mode */
143                         if (desc->bd_iov_count == 0)
144                                 RETURN(0);
145
146                         rc = sptlrpc_enc_pool_get_pages(desc);
147                         if (rc) {
148                                 CERROR("bulk write: failed to allocate "
149                                        "encryption pages: %d\n", rc);
150                                 RETURN(rc);
151                         }
152
153                         token.data = bsd->bsd_data;
154                         token.len = lustre_msg_buflen(msg, offset) -
155                                     sizeof(*bsd);
156
157                         maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
158                         if (maj != GSS_S_COMPLETE) {
159                                 CWARN("fail to encrypt bulk data: %x\n", maj);
160                                 RETURN(-EACCES);
161                         }
162                 }
163         }
164
165         RETURN(0);
166 }
167
168 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
169                             struct ptlrpc_request *req,
170                             struct ptlrpc_bulk_desc *desc)
171 {
172         struct gss_cli_ctx              *gctx;
173         struct lustre_msg               *rmsg, *vmsg;
174         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
175         rawobj_t                         token;
176         __u32                            maj;
177         int                              roff, voff;
178         ENTRY;
179
180         LASSERT(req->rq_pack_bulk);
181         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
182         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
183
184         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
185         case SPTLRPC_SVC_NULL:
186                 vmsg = req->rq_repdata;
187                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
188                 voff = vmsg->lm_bufcount - 1;
189
190                 rmsg = req->rq_reqbuf;
191                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
192                 roff = rmsg->lm_bufcount - 1; /* last segment */
193                 break;
194         case SPTLRPC_SVC_AUTH:
195         case SPTLRPC_SVC_INTG:
196                 vmsg = req->rq_repdata;
197                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
198                 voff = vmsg->lm_bufcount - 2;
199
200                 rmsg = req->rq_reqbuf;
201                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
202                 roff = rmsg->lm_bufcount - 2; /* second last segment */
203                 break;
204         case SPTLRPC_SVC_PRIV:
205                 vmsg = req->rq_repdata;
206                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
207                 voff = vmsg->lm_bufcount - 1;
208
209                 rmsg = req->rq_clrbuf;
210                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
211                 roff = rmsg->lm_bufcount - 1; /* last segment */
212                 break;
213         default:
214                 LBUG();
215         }
216
217         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
218         bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
219         LASSERT(bsdr && bsdv);
220
221         if (bsdr->bsd_version != bsdv->bsd_version ||
222             bsdr->bsd_type != bsdv->bsd_type ||
223             bsdr->bsd_svc != bsdv->bsd_svc) {
224                 CERROR("bulk security descriptor mismatch: "
225                        "(%u,%u,%u) != (%u,%u,%u)\n",
226                        bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
227                        bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
228                 RETURN(-EPROTO);
229         }
230
231         LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
232                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
233                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
234
235         /*
236          * in privacy mode if return success, make sure bd_nob_transferred
237          * is the actual size of the clear text, otherwise upper layer
238          * may be surprised.
239          */
240         if (req->rq_bulk_write) {
241                 if (bsdv->bsd_flags & BSD_FL_ERR) {
242                         CERROR("server reported bulk i/o failure\n");
243                         RETURN(-EIO);
244                 }
245
246                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
247                         desc->bd_nob_transferred = desc->bd_nob;
248         } else {
249                 /*
250                  * bulk read, upon return success, bd_nob_transferred is
251                  * the size of plain text actually received.
252                  */
253                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
254                 LASSERT(gctx->gc_mechctx);
255
256                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
257                         int i, nob;
258
259                         /* fix the actual data size */
260                         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
261                                 if (BD_GET_KIOV(desc, i).kiov_len + nob >
262                                     desc->bd_nob_transferred) {
263                                         BD_GET_KIOV(desc, i).kiov_len =
264                                                 desc->bd_nob_transferred - nob;
265                                 }
266                                 nob += BD_GET_KIOV(desc, i).kiov_len;
267                         }
268
269                         token.data = bsdv->bsd_data;
270                         token.len = lustre_msg_buflen(vmsg, voff) -
271                                     sizeof(*bsdv);
272
273                         maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
274                                               desc->bd_iov_count,
275                                               GET_KIOV(desc),
276                                               &token);
277                         if (maj != GSS_S_COMPLETE) {
278                                 CERROR("failed to verify bulk read: %x\n", maj);
279                                 RETURN(-EACCES);
280                         }
281                 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
282                         desc->bd_nob = bsdv->bsd_nob;
283                         if (desc->bd_nob == 0)
284                                 RETURN(0);
285
286                         token.data = bsdv->bsd_data;
287                         token.len = lustre_msg_buflen(vmsg, voff) -
288                                     sizeof(*bsdr);
289
290                         maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
291                                                &token, 1);
292                         if (maj != GSS_S_COMPLETE) {
293                                 CERROR("failed to decrypt bulk read: %x\n",
294                                        maj);
295                                 RETURN(-EACCES);
296                         }
297
298                         desc->bd_nob_transferred = desc->bd_nob;
299                 }
300         }
301
302         RETURN(0);
303 }
304
305 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
306                          struct gss_ctx *mechctx)
307 {
308         int     rc;
309
310         if (desc->bd_iov_count == 0)
311                 return 0;
312
313         rc = sptlrpc_enc_pool_get_pages(desc);
314         if (rc)
315                 return rc;
316
317         if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
318                 return -EACCES;
319
320         return 0;
321 }
322
323 int gss_cli_prep_bulk(struct ptlrpc_request *req,
324                       struct ptlrpc_bulk_desc *desc)
325 {
326         int             rc;
327         ENTRY;
328
329         LASSERT(req->rq_cli_ctx);
330         LASSERT(req->rq_pack_bulk);
331         LASSERT(req->rq_bulk_read);
332
333         if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
334                 RETURN(0);
335
336         rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
337         if (rc)
338                 CERROR("bulk read: failed to prepare encryption "
339                        "pages: %d\n", rc);
340
341         RETURN(rc);
342 }
343
344 int gss_svc_prep_bulk(struct ptlrpc_request *req,
345                       struct ptlrpc_bulk_desc *desc)
346 {
347         struct gss_svc_reqctx        *grctx;
348         struct ptlrpc_bulk_sec_desc  *bsd;
349         int                           rc;
350         ENTRY;
351
352         LASSERT(req->rq_svc_ctx);
353         LASSERT(req->rq_pack_bulk);
354         LASSERT(req->rq_bulk_write);
355
356         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
357         LASSERT(grctx->src_reqbsd);
358         LASSERT(grctx->src_repbsd);
359         LASSERT(grctx->src_ctx);
360         LASSERT(grctx->src_ctx->gsc_mechctx);
361
362         bsd = grctx->src_reqbsd;
363         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
364                 RETURN(0);
365
366         rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
367         if (rc)
368                 CERROR("bulk write: failed to prepare encryption "
369                        "pages: %d\n", rc);
370
371         RETURN(rc);
372 }
373
374 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
375                         struct ptlrpc_bulk_desc *desc)
376 {
377         struct gss_svc_reqctx        *grctx;
378         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
379         rawobj_t                      token;
380         __u32                         maj;
381         ENTRY;
382
383         LASSERT(req->rq_svc_ctx);
384         LASSERT(req->rq_pack_bulk);
385         LASSERT(req->rq_bulk_write);
386         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
387
388         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
389
390         LASSERT(grctx->src_reqbsd);
391         LASSERT(grctx->src_repbsd);
392         LASSERT(grctx->src_ctx);
393         LASSERT(grctx->src_ctx->gsc_mechctx);
394
395         bsdr = grctx->src_reqbsd;
396         bsdv = grctx->src_repbsd;
397
398         /* bsdr has been sanity checked during unpacking */
399         bsdv->bsd_version = 0;
400         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
401         bsdv->bsd_svc = bsdr->bsd_svc;
402         bsdv->bsd_flags = 0;
403
404         switch (bsdv->bsd_svc) {
405         case SPTLRPC_BULK_SVC_INTG:
406                 token.data = bsdr->bsd_data;
407                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
408
409                 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
410                                       desc->bd_iov_count,
411                                       GET_KIOV(desc), &token);
412                 if (maj != GSS_S_COMPLETE) {
413                         bsdv->bsd_flags |= BSD_FL_ERR;
414                         CERROR("failed to verify bulk signature: %x\n", maj);
415                         RETURN(-EACCES);
416                 }
417                 break;
418         case SPTLRPC_BULK_SVC_PRIV:
419                 if (bsdr->bsd_nob != desc->bd_nob) {
420                         bsdv->bsd_flags |= BSD_FL_ERR;
421                         CERROR("prepared nob %d doesn't match the actual "
422                                "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
423                         RETURN(-EPROTO);
424                 }
425
426                 if (desc->bd_iov_count == 0) {
427                         LASSERT(desc->bd_nob == 0);
428                         break;
429                 }
430
431                 token.data = bsdr->bsd_data;
432                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
433
434                 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
435                                        desc, &token, 0);
436                 if (maj != GSS_S_COMPLETE) {
437                         bsdv->bsd_flags |= BSD_FL_ERR;
438                         CERROR("failed decrypt bulk data: %x\n", maj);
439                         RETURN(-EACCES);
440                 }
441
442                 /* mimic gss_cli_ctx_unwrap_bulk */
443                 desc->bd_nob_transferred = desc->bd_nob;
444
445                 break;
446         }
447
448         RETURN(0);
449 }
450
451 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
452                       struct ptlrpc_bulk_desc *desc)
453 {
454         struct gss_svc_reqctx        *grctx;
455         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
456         rawobj_t                      token;
457         __u32                         maj;
458         int                           rc;
459         ENTRY;
460
461         LASSERT(req->rq_svc_ctx);
462         LASSERT(req->rq_pack_bulk);
463         LASSERT(req->rq_bulk_read);
464         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
465
466         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
467
468         LASSERT(grctx->src_reqbsd);
469         LASSERT(grctx->src_repbsd);
470         LASSERT(grctx->src_ctx);
471         LASSERT(grctx->src_ctx->gsc_mechctx);
472
473         bsdr = grctx->src_reqbsd;
474         bsdv = grctx->src_repbsd;
475
476         /* bsdr has been sanity checked during unpacking */
477         bsdv->bsd_version = 0;
478         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
479         bsdv->bsd_svc = bsdr->bsd_svc;
480         bsdv->bsd_flags = 0;
481
482         switch (bsdv->bsd_svc) {
483         case SPTLRPC_BULK_SVC_INTG:
484                 token.data = bsdv->bsd_data;
485                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
486
487                 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
488                                    desc->bd_iov_count,
489                                    GET_KIOV(desc), &token);
490                 if (maj != GSS_S_COMPLETE) {
491                         bsdv->bsd_flags |= BSD_FL_ERR;
492                         CERROR("failed to sign bulk data: %x\n", maj);
493                         RETURN(-EACCES);
494                 }
495                 break;
496         case SPTLRPC_BULK_SVC_PRIV:
497                 bsdv->bsd_nob = desc->bd_nob;
498
499                 if (desc->bd_iov_count == 0) {
500                         LASSERT(desc->bd_nob == 0);
501                         break;
502                 }
503
504                 rc = sptlrpc_enc_pool_get_pages(desc);
505                 if (rc) {
506                         bsdv->bsd_flags |= BSD_FL_ERR;
507                         CERROR("bulk read: failed to allocate encryption "
508                                "pages: %d\n", rc);
509                         RETURN(rc);
510                 }
511
512                 token.data = bsdv->bsd_data;
513                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
514
515                 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
516                                      desc, &token, 1);
517                 if (maj != GSS_S_COMPLETE) {
518                         bsdv->bsd_flags |= BSD_FL_ERR;
519                         CERROR("failed to encrypt bulk data: %x\n", maj);
520                         RETURN(-EACCES);
521                 }
522                 break;
523         }
524
525         RETURN(0);
526 }