Whamcloud - gitweb
LU-14487 modules: remove references to Sun Trademark.
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ptlrpc/gss/gss_bulk.c
32  *
33  * Author: Eric Mei <eric.mei@sun.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_SEC
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/dcache.h>
41 #include <linux/fs.h>
42 #include <linux/mutex.h>
43 #include <linux/crypto.h>
44
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_sec.h>
51
52 #include "gss_err.h"
53 #include "gss_internal.h"
54 #include "gss_api.h"
55
56 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
57                           struct ptlrpc_request *req,
58                           struct ptlrpc_bulk_desc *desc)
59 {
60         struct gss_cli_ctx              *gctx;
61         struct lustre_msg               *msg;
62         struct ptlrpc_bulk_sec_desc     *bsd;
63         rawobj_t                         token;
64         __u32                            maj;
65         int                              offset;
66         int                              rc;
67         ENTRY;
68
69         LASSERT(req->rq_pack_bulk);
70         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
71
72         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
73         LASSERT(gctx->gc_mechctx);
74
75         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
76         case SPTLRPC_SVC_NULL:
77                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
78                 msg = req->rq_reqbuf;
79                 offset = msg->lm_bufcount - 1;
80                 break;
81         case SPTLRPC_SVC_AUTH:
82         case SPTLRPC_SVC_INTG:
83                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
84                 msg = req->rq_reqbuf;
85                 offset = msg->lm_bufcount - 2;
86                 break;
87         case SPTLRPC_SVC_PRIV:
88                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
89                 msg = req->rq_clrbuf;
90                 offset = msg->lm_bufcount - 1;
91                 break;
92         default:
93                 LBUG();
94         }
95
96         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
97         bsd->bsd_version = 0;
98         bsd->bsd_flags = 0;
99         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
100         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
101
102         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
103                 RETURN(0);
104
105         LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
106                 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
107
108         if (req->rq_bulk_read) {
109                 /*
110                  * bulk read: prepare receiving pages only for privacy mode.
111                  */
112                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
113                         return gss_cli_prep_bulk(req, desc);
114         } else {
115                 /*
116                  * bulk write: sign or encrypt bulk pages.
117                  */
118                 bsd->bsd_nob = desc->bd_nob;
119
120                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
121                         /* integrity mode */
122                         token.data = bsd->bsd_data;
123                         token.len = lustre_msg_buflen(msg, offset) -
124                                     sizeof(*bsd);
125
126                         maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
127                                            desc->bd_iov_count,
128                                            desc->bd_vec,
129                                            &token);
130                         if (maj != GSS_S_COMPLETE) {
131                                 CWARN("failed to sign bulk data: %x\n", maj);
132                                 RETURN(-EACCES);
133                         }
134                 } else {
135                         /* privacy mode */
136                         if (desc->bd_iov_count == 0)
137                                 RETURN(0);
138
139                         rc = sptlrpc_enc_pool_get_pages(desc);
140                         if (rc) {
141                                 CERROR("bulk write: failed to allocate "
142                                        "encryption pages: %d\n", rc);
143                                 RETURN(rc);
144                         }
145
146                         token.data = bsd->bsd_data;
147                         token.len = lustre_msg_buflen(msg, offset) -
148                                     sizeof(*bsd);
149
150                         maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
151                         if (maj != GSS_S_COMPLETE) {
152                                 CWARN("fail to encrypt bulk data: %x\n", maj);
153                                 RETURN(-EACCES);
154                         }
155                 }
156         }
157
158         RETURN(0);
159 }
160
161 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
162                             struct ptlrpc_request *req,
163                             struct ptlrpc_bulk_desc *desc)
164 {
165         struct gss_cli_ctx              *gctx;
166         struct lustre_msg               *rmsg, *vmsg;
167         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
168         rawobj_t                         token;
169         __u32                            maj;
170         int                              roff, voff;
171         ENTRY;
172
173         LASSERT(req->rq_pack_bulk);
174         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
175
176         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
177         case SPTLRPC_SVC_NULL:
178                 vmsg = req->rq_repdata;
179                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
180                 voff = vmsg->lm_bufcount - 1;
181
182                 rmsg = req->rq_reqbuf;
183                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
184                 roff = rmsg->lm_bufcount - 1; /* last segment */
185                 break;
186         case SPTLRPC_SVC_AUTH:
187         case SPTLRPC_SVC_INTG:
188                 vmsg = req->rq_repdata;
189                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
190                 voff = vmsg->lm_bufcount - 2;
191
192                 rmsg = req->rq_reqbuf;
193                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
194                 roff = rmsg->lm_bufcount - 2; /* second last segment */
195                 break;
196         case SPTLRPC_SVC_PRIV:
197                 vmsg = req->rq_repdata;
198                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
199                 voff = vmsg->lm_bufcount - 1;
200
201                 rmsg = req->rq_clrbuf;
202                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
203                 roff = rmsg->lm_bufcount - 1; /* last segment */
204                 break;
205         default:
206                 LBUG();
207         }
208
209         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
210         bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
211         LASSERT(bsdr && bsdv);
212
213         if (bsdr->bsd_version != bsdv->bsd_version ||
214             bsdr->bsd_type != bsdv->bsd_type ||
215             bsdr->bsd_svc != bsdv->bsd_svc) {
216                 CERROR("bulk security descriptor mismatch: "
217                        "(%u,%u,%u) != (%u,%u,%u)\n",
218                        bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
219                        bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
220                 RETURN(-EPROTO);
221         }
222
223         LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
224                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
225                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
226
227         /*
228          * in privacy mode if return success, make sure bd_nob_transferred
229          * is the actual size of the clear text, otherwise upper layer
230          * may be surprised.
231          */
232         if (req->rq_bulk_write) {
233                 if (bsdv->bsd_flags & BSD_FL_ERR) {
234                         CERROR("server reported bulk i/o failure\n");
235                         RETURN(-EIO);
236                 }
237
238                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
239                         desc->bd_nob_transferred = desc->bd_nob;
240         } else {
241                 /*
242                  * bulk read, upon return success, bd_nob_transferred is
243                  * the size of plain text actually received.
244                  */
245                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
246                 LASSERT(gctx->gc_mechctx);
247
248                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
249                         int i, nob;
250
251                         /* fix the actual data size */
252                         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
253                                 if (desc->bd_vec[i].bv_len + nob >
254                                     desc->bd_nob_transferred) {
255                                         desc->bd_vec[i].bv_len =
256                                                 desc->bd_nob_transferred - nob;
257                                 }
258                                 nob += desc->bd_vec[i].bv_len;
259                         }
260
261                         token.data = bsdv->bsd_data;
262                         token.len = lustre_msg_buflen(vmsg, voff) -
263                                     sizeof(*bsdv);
264
265                         maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
266                                               desc->bd_iov_count,
267                                               desc->bd_vec,
268                                               &token);
269                         if (maj != GSS_S_COMPLETE) {
270                                 CERROR("failed to verify bulk read: %x\n", maj);
271                                 RETURN(-EACCES);
272                         }
273                 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
274                         desc->bd_nob = bsdv->bsd_nob;
275                         if (desc->bd_nob == 0)
276                                 RETURN(0);
277
278                         token.data = bsdv->bsd_data;
279                         token.len = lustre_msg_buflen(vmsg, voff) -
280                                     sizeof(*bsdr);
281
282                         maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
283                                                &token, 1);
284                         if (maj != GSS_S_COMPLETE) {
285                                 CERROR("failed to decrypt bulk read: %x\n",
286                                        maj);
287                                 RETURN(-EACCES);
288                         }
289
290                         desc->bd_nob_transferred = desc->bd_nob;
291                 }
292         }
293
294         RETURN(0);
295 }
296
297 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
298                          struct gss_ctx *mechctx)
299 {
300         int     rc;
301
302         if (desc->bd_iov_count == 0)
303                 return 0;
304
305         rc = sptlrpc_enc_pool_get_pages(desc);
306         if (rc)
307                 return rc;
308
309         if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
310                 return -EACCES;
311
312         return 0;
313 }
314
315 int gss_cli_prep_bulk(struct ptlrpc_request *req,
316                       struct ptlrpc_bulk_desc *desc)
317 {
318         int             rc;
319         ENTRY;
320
321         LASSERT(req->rq_cli_ctx);
322         LASSERT(req->rq_pack_bulk);
323         LASSERT(req->rq_bulk_read);
324
325         if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
326                 RETURN(0);
327
328         rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
329         if (rc)
330                 CERROR("bulk read: failed to prepare encryption "
331                        "pages: %d\n", rc);
332
333         RETURN(rc);
334 }
335
336 int gss_svc_prep_bulk(struct ptlrpc_request *req,
337                       struct ptlrpc_bulk_desc *desc)
338 {
339         struct gss_svc_reqctx        *grctx;
340         struct ptlrpc_bulk_sec_desc  *bsd;
341         int                           rc;
342         ENTRY;
343
344         LASSERT(req->rq_svc_ctx);
345         LASSERT(req->rq_pack_bulk);
346         LASSERT(req->rq_bulk_write);
347
348         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
349         LASSERT(grctx->src_reqbsd);
350         LASSERT(grctx->src_repbsd);
351         LASSERT(grctx->src_ctx);
352         LASSERT(grctx->src_ctx->gsc_mechctx);
353
354         bsd = grctx->src_reqbsd;
355         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
356                 RETURN(0);
357
358         rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
359         if (rc)
360                 CERROR("bulk write: failed to prepare encryption "
361                        "pages: %d\n", rc);
362
363         RETURN(rc);
364 }
365
366 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
367                         struct ptlrpc_bulk_desc *desc)
368 {
369         struct gss_svc_reqctx        *grctx;
370         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
371         rawobj_t                      token;
372         __u32                         maj;
373         ENTRY;
374
375         LASSERT(req->rq_svc_ctx);
376         LASSERT(req->rq_pack_bulk);
377         LASSERT(req->rq_bulk_write);
378
379         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
380
381         LASSERT(grctx->src_reqbsd);
382         LASSERT(grctx->src_repbsd);
383         LASSERT(grctx->src_ctx);
384         LASSERT(grctx->src_ctx->gsc_mechctx);
385
386         bsdr = grctx->src_reqbsd;
387         bsdv = grctx->src_repbsd;
388
389         /* bsdr has been sanity checked during unpacking */
390         bsdv->bsd_version = 0;
391         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
392         bsdv->bsd_svc = bsdr->bsd_svc;
393         bsdv->bsd_flags = 0;
394
395         switch (bsdv->bsd_svc) {
396         case SPTLRPC_BULK_SVC_INTG:
397                 token.data = bsdr->bsd_data;
398                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
399
400                 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
401                                       desc->bd_iov_count,
402                                       desc->bd_vec, &token);
403                 if (maj != GSS_S_COMPLETE) {
404                         bsdv->bsd_flags |= BSD_FL_ERR;
405                         CERROR("failed to verify bulk signature: %x\n", maj);
406                         RETURN(-EACCES);
407                 }
408                 break;
409         case SPTLRPC_BULK_SVC_PRIV:
410                 if (bsdr->bsd_nob != desc->bd_nob) {
411                         bsdv->bsd_flags |= BSD_FL_ERR;
412                         CERROR("prepared nob %d doesn't match the actual "
413                                "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
414                         RETURN(-EPROTO);
415                 }
416
417                 if (desc->bd_iov_count == 0) {
418                         LASSERT(desc->bd_nob == 0);
419                         break;
420                 }
421
422                 token.data = bsdr->bsd_data;
423                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
424
425                 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
426                                        desc, &token, 0);
427                 if (maj != GSS_S_COMPLETE) {
428                         bsdv->bsd_flags |= BSD_FL_ERR;
429                         CERROR("failed decrypt bulk data: %x\n", maj);
430                         RETURN(-EACCES);
431                 }
432
433                 /* mimic gss_cli_ctx_unwrap_bulk */
434                 desc->bd_nob_transferred = desc->bd_nob;
435
436                 break;
437         }
438
439         RETURN(0);
440 }
441
442 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
443                       struct ptlrpc_bulk_desc *desc)
444 {
445         struct gss_svc_reqctx        *grctx;
446         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
447         rawobj_t                      token;
448         __u32                         maj;
449         int                           rc;
450         ENTRY;
451
452         LASSERT(req->rq_svc_ctx);
453         LASSERT(req->rq_pack_bulk);
454         LASSERT(req->rq_bulk_read);
455
456         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
457
458         LASSERT(grctx->src_reqbsd);
459         LASSERT(grctx->src_repbsd);
460         LASSERT(grctx->src_ctx);
461         LASSERT(grctx->src_ctx->gsc_mechctx);
462
463         bsdr = grctx->src_reqbsd;
464         bsdv = grctx->src_repbsd;
465
466         /* bsdr has been sanity checked during unpacking */
467         bsdv->bsd_version = 0;
468         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
469         bsdv->bsd_svc = bsdr->bsd_svc;
470         bsdv->bsd_flags = 0;
471
472         switch (bsdv->bsd_svc) {
473         case SPTLRPC_BULK_SVC_INTG:
474                 token.data = bsdv->bsd_data;
475                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
476
477                 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
478                                    desc->bd_iov_count,
479                                    desc->bd_vec, &token);
480                 if (maj != GSS_S_COMPLETE) {
481                         bsdv->bsd_flags |= BSD_FL_ERR;
482                         CERROR("failed to sign bulk data: %x\n", maj);
483                         RETURN(-EACCES);
484                 }
485                 break;
486         case SPTLRPC_BULK_SVC_PRIV:
487                 bsdv->bsd_nob = desc->bd_nob;
488
489                 if (desc->bd_iov_count == 0) {
490                         LASSERT(desc->bd_nob == 0);
491                         break;
492                 }
493
494                 rc = sptlrpc_enc_pool_get_pages(desc);
495                 if (rc) {
496                         bsdv->bsd_flags |= BSD_FL_ERR;
497                         CERROR("bulk read: failed to allocate encryption "
498                                "pages: %d\n", rc);
499                         RETURN(rc);
500                 }
501
502                 token.data = bsdv->bsd_data;
503                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
504
505                 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
506                                      desc, &token, 1);
507                 if (maj != GSS_S_COMPLETE) {
508                         bsdv->bsd_flags |= BSD_FL_ERR;
509                         CERROR("failed to encrypt bulk data: %x\n", maj);
510                         RETURN(-EACCES);
511                 }
512                 break;
513         }
514
515         RETURN(0);
516 }