Whamcloud - gitweb
LU-13004 ptlrpc: simplify bd_vec access.
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/gss/gss_bulk.c
33  *
34  * Author: Eric Mei <eric.mei@sun.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/init.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/dcache.h>
42 #include <linux/fs.h>
43 #include <linux/mutex.h>
44 #include <linux/crypto.h>
45
46 #include <obd.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <lustre_net.h>
50 #include <lustre_import.h>
51 #include <lustre_sec.h>
52
53 #include "gss_err.h"
54 #include "gss_internal.h"
55 #include "gss_api.h"
56
57 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
58                           struct ptlrpc_request *req,
59                           struct ptlrpc_bulk_desc *desc)
60 {
61         struct gss_cli_ctx              *gctx;
62         struct lustre_msg               *msg;
63         struct ptlrpc_bulk_sec_desc     *bsd;
64         rawobj_t                         token;
65         __u32                            maj;
66         int                              offset;
67         int                              rc;
68         ENTRY;
69
70         LASSERT(req->rq_pack_bulk);
71         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
72
73         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
74         LASSERT(gctx->gc_mechctx);
75
76         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
77         case SPTLRPC_SVC_NULL:
78                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
79                 msg = req->rq_reqbuf;
80                 offset = msg->lm_bufcount - 1;
81                 break;
82         case SPTLRPC_SVC_AUTH:
83         case SPTLRPC_SVC_INTG:
84                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
85                 msg = req->rq_reqbuf;
86                 offset = msg->lm_bufcount - 2;
87                 break;
88         case SPTLRPC_SVC_PRIV:
89                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
90                 msg = req->rq_clrbuf;
91                 offset = msg->lm_bufcount - 1;
92                 break;
93         default:
94                 LBUG();
95         }
96
97         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
98         bsd->bsd_version = 0;
99         bsd->bsd_flags = 0;
100         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
101         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
102
103         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
104                 RETURN(0);
105
106         LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
107                 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
108
109         if (req->rq_bulk_read) {
110                 /*
111                  * bulk read: prepare receiving pages only for privacy mode.
112                  */
113                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
114                         return gss_cli_prep_bulk(req, desc);
115         } else {
116                 /*
117                  * bulk write: sign or encrypt bulk pages.
118                  */
119                 bsd->bsd_nob = desc->bd_nob;
120
121                 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
122                         /* integrity mode */
123                         token.data = bsd->bsd_data;
124                         token.len = lustre_msg_buflen(msg, offset) -
125                                     sizeof(*bsd);
126
127                         maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
128                                            desc->bd_iov_count,
129                                            desc->bd_vec,
130                                            &token);
131                         if (maj != GSS_S_COMPLETE) {
132                                 CWARN("failed to sign bulk data: %x\n", maj);
133                                 RETURN(-EACCES);
134                         }
135                 } else {
136                         /* privacy mode */
137                         if (desc->bd_iov_count == 0)
138                                 RETURN(0);
139
140                         rc = sptlrpc_enc_pool_get_pages(desc);
141                         if (rc) {
142                                 CERROR("bulk write: failed to allocate "
143                                        "encryption pages: %d\n", rc);
144                                 RETURN(rc);
145                         }
146
147                         token.data = bsd->bsd_data;
148                         token.len = lustre_msg_buflen(msg, offset) -
149                                     sizeof(*bsd);
150
151                         maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
152                         if (maj != GSS_S_COMPLETE) {
153                                 CWARN("fail to encrypt bulk data: %x\n", maj);
154                                 RETURN(-EACCES);
155                         }
156                 }
157         }
158
159         RETURN(0);
160 }
161
162 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
163                             struct ptlrpc_request *req,
164                             struct ptlrpc_bulk_desc *desc)
165 {
166         struct gss_cli_ctx              *gctx;
167         struct lustre_msg               *rmsg, *vmsg;
168         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
169         rawobj_t                         token;
170         __u32                            maj;
171         int                              roff, voff;
172         ENTRY;
173
174         LASSERT(req->rq_pack_bulk);
175         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
176
177         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
178         case SPTLRPC_SVC_NULL:
179                 vmsg = req->rq_repdata;
180                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 3);
181                 voff = vmsg->lm_bufcount - 1;
182
183                 rmsg = req->rq_reqbuf;
184                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 3);
185                 roff = rmsg->lm_bufcount - 1; /* last segment */
186                 break;
187         case SPTLRPC_SVC_AUTH:
188         case SPTLRPC_SVC_INTG:
189                 vmsg = req->rq_repdata;
190                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 4);
191                 voff = vmsg->lm_bufcount - 2;
192
193                 rmsg = req->rq_reqbuf;
194                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 4);
195                 roff = rmsg->lm_bufcount - 2; /* second last segment */
196                 break;
197         case SPTLRPC_SVC_PRIV:
198                 vmsg = req->rq_repdata;
199                 LASSERT(vmsg != NULL && vmsg->lm_bufcount >= 2);
200                 voff = vmsg->lm_bufcount - 1;
201
202                 rmsg = req->rq_clrbuf;
203                 LASSERT(rmsg != NULL && rmsg->lm_bufcount >= 2);
204                 roff = rmsg->lm_bufcount - 1; /* last segment */
205                 break;
206         default:
207                 LBUG();
208         }
209
210         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
211         bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
212         LASSERT(bsdr && bsdv);
213
214         if (bsdr->bsd_version != bsdv->bsd_version ||
215             bsdr->bsd_type != bsdv->bsd_type ||
216             bsdr->bsd_svc != bsdv->bsd_svc) {
217                 CERROR("bulk security descriptor mismatch: "
218                        "(%u,%u,%u) != (%u,%u,%u)\n",
219                        bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
220                        bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
221                 RETURN(-EPROTO);
222         }
223
224         LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
225                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
226                 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
227
228         /*
229          * in privacy mode if return success, make sure bd_nob_transferred
230          * is the actual size of the clear text, otherwise upper layer
231          * may be surprised.
232          */
233         if (req->rq_bulk_write) {
234                 if (bsdv->bsd_flags & BSD_FL_ERR) {
235                         CERROR("server reported bulk i/o failure\n");
236                         RETURN(-EIO);
237                 }
238
239                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
240                         desc->bd_nob_transferred = desc->bd_nob;
241         } else {
242                 /*
243                  * bulk read, upon return success, bd_nob_transferred is
244                  * the size of plain text actually received.
245                  */
246                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
247                 LASSERT(gctx->gc_mechctx);
248
249                 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
250                         int i, nob;
251
252                         /* fix the actual data size */
253                         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
254                                 if (desc->bd_vec[i].kiov_len + nob >
255                                     desc->bd_nob_transferred) {
256                                         desc->bd_vec[i].kiov_len =
257                                                 desc->bd_nob_transferred - nob;
258                                 }
259                                 nob += desc->bd_vec[i].kiov_len;
260                         }
261
262                         token.data = bsdv->bsd_data;
263                         token.len = lustre_msg_buflen(vmsg, voff) -
264                                     sizeof(*bsdv);
265
266                         maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
267                                               desc->bd_iov_count,
268                                               desc->bd_vec,
269                                               &token);
270                         if (maj != GSS_S_COMPLETE) {
271                                 CERROR("failed to verify bulk read: %x\n", maj);
272                                 RETURN(-EACCES);
273                         }
274                 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
275                         desc->bd_nob = bsdv->bsd_nob;
276                         if (desc->bd_nob == 0)
277                                 RETURN(0);
278
279                         token.data = bsdv->bsd_data;
280                         token.len = lustre_msg_buflen(vmsg, voff) -
281                                     sizeof(*bsdr);
282
283                         maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
284                                                &token, 1);
285                         if (maj != GSS_S_COMPLETE) {
286                                 CERROR("failed to decrypt bulk read: %x\n",
287                                        maj);
288                                 RETURN(-EACCES);
289                         }
290
291                         desc->bd_nob_transferred = desc->bd_nob;
292                 }
293         }
294
295         RETURN(0);
296 }
297
298 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
299                          struct gss_ctx *mechctx)
300 {
301         int     rc;
302
303         if (desc->bd_iov_count == 0)
304                 return 0;
305
306         rc = sptlrpc_enc_pool_get_pages(desc);
307         if (rc)
308                 return rc;
309
310         if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
311                 return -EACCES;
312
313         return 0;
314 }
315
316 int gss_cli_prep_bulk(struct ptlrpc_request *req,
317                       struct ptlrpc_bulk_desc *desc)
318 {
319         int             rc;
320         ENTRY;
321
322         LASSERT(req->rq_cli_ctx);
323         LASSERT(req->rq_pack_bulk);
324         LASSERT(req->rq_bulk_read);
325
326         if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
327                 RETURN(0);
328
329         rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
330         if (rc)
331                 CERROR("bulk read: failed to prepare encryption "
332                        "pages: %d\n", rc);
333
334         RETURN(rc);
335 }
336
337 int gss_svc_prep_bulk(struct ptlrpc_request *req,
338                       struct ptlrpc_bulk_desc *desc)
339 {
340         struct gss_svc_reqctx        *grctx;
341         struct ptlrpc_bulk_sec_desc  *bsd;
342         int                           rc;
343         ENTRY;
344
345         LASSERT(req->rq_svc_ctx);
346         LASSERT(req->rq_pack_bulk);
347         LASSERT(req->rq_bulk_write);
348
349         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
350         LASSERT(grctx->src_reqbsd);
351         LASSERT(grctx->src_repbsd);
352         LASSERT(grctx->src_ctx);
353         LASSERT(grctx->src_ctx->gsc_mechctx);
354
355         bsd = grctx->src_reqbsd;
356         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
357                 RETURN(0);
358
359         rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
360         if (rc)
361                 CERROR("bulk write: failed to prepare encryption "
362                        "pages: %d\n", rc);
363
364         RETURN(rc);
365 }
366
367 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
368                         struct ptlrpc_bulk_desc *desc)
369 {
370         struct gss_svc_reqctx        *grctx;
371         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
372         rawobj_t                      token;
373         __u32                         maj;
374         ENTRY;
375
376         LASSERT(req->rq_svc_ctx);
377         LASSERT(req->rq_pack_bulk);
378         LASSERT(req->rq_bulk_write);
379
380         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
381
382         LASSERT(grctx->src_reqbsd);
383         LASSERT(grctx->src_repbsd);
384         LASSERT(grctx->src_ctx);
385         LASSERT(grctx->src_ctx->gsc_mechctx);
386
387         bsdr = grctx->src_reqbsd;
388         bsdv = grctx->src_repbsd;
389
390         /* bsdr has been sanity checked during unpacking */
391         bsdv->bsd_version = 0;
392         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
393         bsdv->bsd_svc = bsdr->bsd_svc;
394         bsdv->bsd_flags = 0;
395
396         switch (bsdv->bsd_svc) {
397         case SPTLRPC_BULK_SVC_INTG:
398                 token.data = bsdr->bsd_data;
399                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
400
401                 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
402                                       desc->bd_iov_count,
403                                       desc->bd_vec, &token);
404                 if (maj != GSS_S_COMPLETE) {
405                         bsdv->bsd_flags |= BSD_FL_ERR;
406                         CERROR("failed to verify bulk signature: %x\n", maj);
407                         RETURN(-EACCES);
408                 }
409                 break;
410         case SPTLRPC_BULK_SVC_PRIV:
411                 if (bsdr->bsd_nob != desc->bd_nob) {
412                         bsdv->bsd_flags |= BSD_FL_ERR;
413                         CERROR("prepared nob %d doesn't match the actual "
414                                "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
415                         RETURN(-EPROTO);
416                 }
417
418                 if (desc->bd_iov_count == 0) {
419                         LASSERT(desc->bd_nob == 0);
420                         break;
421                 }
422
423                 token.data = bsdr->bsd_data;
424                 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
425
426                 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
427                                        desc, &token, 0);
428                 if (maj != GSS_S_COMPLETE) {
429                         bsdv->bsd_flags |= BSD_FL_ERR;
430                         CERROR("failed decrypt bulk data: %x\n", maj);
431                         RETURN(-EACCES);
432                 }
433
434                 /* mimic gss_cli_ctx_unwrap_bulk */
435                 desc->bd_nob_transferred = desc->bd_nob;
436
437                 break;
438         }
439
440         RETURN(0);
441 }
442
443 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
444                       struct ptlrpc_bulk_desc *desc)
445 {
446         struct gss_svc_reqctx        *grctx;
447         struct ptlrpc_bulk_sec_desc  *bsdr, *bsdv;
448         rawobj_t                      token;
449         __u32                         maj;
450         int                           rc;
451         ENTRY;
452
453         LASSERT(req->rq_svc_ctx);
454         LASSERT(req->rq_pack_bulk);
455         LASSERT(req->rq_bulk_read);
456
457         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
458
459         LASSERT(grctx->src_reqbsd);
460         LASSERT(grctx->src_repbsd);
461         LASSERT(grctx->src_ctx);
462         LASSERT(grctx->src_ctx->gsc_mechctx);
463
464         bsdr = grctx->src_reqbsd;
465         bsdv = grctx->src_repbsd;
466
467         /* bsdr has been sanity checked during unpacking */
468         bsdv->bsd_version = 0;
469         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
470         bsdv->bsd_svc = bsdr->bsd_svc;
471         bsdv->bsd_flags = 0;
472
473         switch (bsdv->bsd_svc) {
474         case SPTLRPC_BULK_SVC_INTG:
475                 token.data = bsdv->bsd_data;
476                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
477
478                 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
479                                    desc->bd_iov_count,
480                                    desc->bd_vec, &token);
481                 if (maj != GSS_S_COMPLETE) {
482                         bsdv->bsd_flags |= BSD_FL_ERR;
483                         CERROR("failed to sign bulk data: %x\n", maj);
484                         RETURN(-EACCES);
485                 }
486                 break;
487         case SPTLRPC_BULK_SVC_PRIV:
488                 bsdv->bsd_nob = desc->bd_nob;
489
490                 if (desc->bd_iov_count == 0) {
491                         LASSERT(desc->bd_nob == 0);
492                         break;
493                 }
494
495                 rc = sptlrpc_enc_pool_get_pages(desc);
496                 if (rc) {
497                         bsdv->bsd_flags |= BSD_FL_ERR;
498                         CERROR("bulk read: failed to allocate encryption "
499                                "pages: %d\n", rc);
500                         RETURN(rc);
501                 }
502
503                 token.data = bsdv->bsd_data;
504                 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
505
506                 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
507                                      desc, &token, 1);
508                 if (maj != GSS_S_COMPLETE) {
509                         bsdv->bsd_flags |= BSD_FL_ERR;
510                         CERROR("failed to encrypt bulk data: %x\n", maj);
511                         RETURN(-EACCES);
512                 }
513                 break;
514         }
515
516         RETURN(0);
517 }