Whamcloud - gitweb
e4f2115d75da2a4b9dd853d3dd2bc46203e9d324
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec_plain.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39
40 #include <obd_support.h>
41 #include <obd_cksum.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lustre_sec.h>
45
46 #include "ptlrpc_internal.h"
47
48 struct plain_sec {
49         struct ptlrpc_sec       pls_base;
50         rwlock_t            pls_lock;
51         struct ptlrpc_cli_ctx  *pls_ctx;
52 };
53
54 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
55 {
56         return container_of(sec, struct plain_sec, pls_base);
57 }
58
59 static struct ptlrpc_sec_policy plain_policy;
60 static struct ptlrpc_ctx_ops    plain_ctx_ops;
61 static struct ptlrpc_svc_ctx    plain_svc_ctx;
62
63 static unsigned int plain_at_offset;
64
65 /*
66  * for simplicity, plain policy rpc use fixed layout.
67  */
68 #define PLAIN_PACK_SEGMENTS             (4)
69
70 #define PLAIN_PACK_HDR_OFF              (0)
71 #define PLAIN_PACK_MSG_OFF              (1)
72 #define PLAIN_PACK_USER_OFF             (2)
73 #define PLAIN_PACK_BULK_OFF             (3)
74
75 #define PLAIN_FL_USER                   (0x01)
76 #define PLAIN_FL_BULK                   (0x02)
77
78 struct plain_header {
79         __u8            ph_ver;            /* 0 */
80         __u8            ph_flags;
81         __u8            ph_sp;             /* source */
82         __u8            ph_bulk_hash_alg;  /* complete flavor desc */
83         __u8            ph_pad[4];
84 };
85
86 struct plain_bulk_token {
87         __u8            pbt_hash[8];
88 };
89
90 #define PLAIN_BSD_SIZE \
91         (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
92
93 /****************************************
94  * bulk checksum helpers                *
95  ****************************************/
96
97 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
98 {
99         struct ptlrpc_bulk_sec_desc *bsd;
100
101         if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
102                 return -EPROTO;
103
104         bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
105         if (bsd == NULL) {
106                 CERROR("bulk sec desc has short size %d\n",
107                        lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
108                 return -EPROTO;
109         }
110
111         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
112             bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
113                 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
114                 return -EPROTO;
115         }
116
117         return 0;
118 }
119
120 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
121                                     __u8 hash_alg,
122                                     struct plain_bulk_token *token)
123 {
124         if (hash_alg == BULK_HASH_ALG_NULL)
125                 return 0;
126
127         memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
128         return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
129                                          sizeof(token->pbt_hash));
130 }
131
132 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
133                                   __u8 hash_alg,
134                                   struct plain_bulk_token *tokenr)
135 {
136         struct plain_bulk_token tokenv;
137         int                     rc;
138
139         if (hash_alg == BULK_HASH_ALG_NULL)
140                 return 0;
141
142         memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
143         rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
144                                        sizeof(tokenv.pbt_hash));
145         if (rc)
146                 return rc;
147
148         if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
149                 return -EACCES;
150         return 0;
151 }
152
153 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
154 {
155         char           *ptr;
156         unsigned int    off, i;
157
158         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
159
160         for (i = 0; i < desc->bd_iov_count; i++) {
161                 if (BD_GET_KIOV(desc, i).kiov_len == 0)
162                         continue;
163
164                 ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
165                 off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
166                 ptr[off] ^= 0x1;
167                 kunmap(BD_GET_KIOV(desc, i).kiov_page);
168                 return;
169         }
170 }
171
172 /****************************************
173  * cli_ctx apis                         *
174  ****************************************/
175
176 static
177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
178 {
179         /* should never reach here */
180         LBUG();
181         return 0;
182 }
183
184 static
185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
186 {
187         return 0;
188 }
189
190 static
191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
192 {
193         struct lustre_msg   *msg = req->rq_reqbuf;
194         struct plain_header *phdr;
195         ENTRY;
196
197         msg->lm_secflvr = req->rq_flvr.sf_rpc;
198
199         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
200         phdr->ph_ver = 0;
201         phdr->ph_flags = 0;
202         phdr->ph_sp = ctx->cc_sec->ps_part;
203         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
204
205         if (req->rq_pack_udesc)
206                 phdr->ph_flags |= PLAIN_FL_USER;
207         if (req->rq_pack_bulk)
208                 phdr->ph_flags |= PLAIN_FL_BULK;
209
210         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
211                                                  msg->lm_buflens);
212         RETURN(0);
213 }
214
215 static
216 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
217 {
218         struct lustre_msg   *msg = req->rq_repdata;
219         struct plain_header *phdr;
220         __u32                cksum;
221         int                  swabbed;
222         ENTRY;
223
224         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
225                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
226                 RETURN(-EPROTO);
227         }
228
229         swabbed = ptlrpc_rep_need_swab(req);
230
231         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
232         if (phdr == NULL) {
233                 CERROR("missing plain header\n");
234                 RETURN(-EPROTO);
235         }
236
237         if (phdr->ph_ver != 0) {
238                 CERROR("Invalid header version\n");
239                 RETURN(-EPROTO);
240         }
241
242         /* expect no user desc in reply */
243         if (phdr->ph_flags & PLAIN_FL_USER) {
244                 CERROR("Unexpected udesc flag in reply\n");
245                 RETURN(-EPROTO);
246         }
247
248         if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
249                 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
250                        req->rq_flvr.u_bulk.hash.hash_alg);
251                 RETURN(-EPROTO);
252         }
253
254         if (unlikely(req->rq_early)) {
255                 unsigned int hsize = 4;
256
257                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
258                                 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
259                                 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
260                                 NULL, 0, (unsigned char *)&cksum, &hsize);
261                 if (cksum != msg->lm_cksum) {
262                         CDEBUG(D_SEC,
263                                "early reply checksum mismatch: %08x != %08x\n",
264                                cpu_to_le32(cksum), msg->lm_cksum);
265                         RETURN(-EINVAL);
266                 }
267         } else {
268                 /* whether we sent with bulk or not, we expect the same
269                  * in reply, except for early reply */
270                 if (!req->rq_early &&
271                     !equi(req->rq_pack_bulk == 1,
272                           phdr->ph_flags & PLAIN_FL_BULK)) {
273                         CERROR("%s bulk checksum in reply\n",
274                                req->rq_pack_bulk ? "Missing" : "Unexpected");
275                         RETURN(-EPROTO);
276                 }
277
278                 if (phdr->ph_flags & PLAIN_FL_BULK) {
279                         if (plain_unpack_bsd(msg, swabbed))
280                                 RETURN(-EPROTO);
281                 }
282         }
283
284         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
285         req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
286         RETURN(0);
287 }
288
289 static
290 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
291                         struct ptlrpc_request *req,
292                         struct ptlrpc_bulk_desc *desc)
293 {
294         struct ptlrpc_bulk_sec_desc *bsd;
295         struct plain_bulk_token     *token;
296         int                          rc;
297
298         LASSERT(req->rq_pack_bulk);
299         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
300
301         bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
302         token = (struct plain_bulk_token *) bsd->bsd_data;
303
304         bsd->bsd_version = 0;
305         bsd->bsd_flags = 0;
306         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
307         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
308
309         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
310                 RETURN(0);
311
312         if (req->rq_bulk_read)
313                 RETURN(0);
314
315         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
316                                       token);
317         if (rc) {
318                 CERROR("bulk write: failed to compute checksum: %d\n", rc);
319         } else {
320                 /*
321                  * for sending we only compute the wrong checksum instead
322                  * of corrupting the data so it is still correct on a redo
323                  */
324                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
325                     req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
326                         token->pbt_hash[0] ^= 0x1;
327         }
328
329         return rc;
330 }
331
332 static
333 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
334                           struct ptlrpc_request *req,
335                           struct ptlrpc_bulk_desc *desc)
336 {
337         struct ptlrpc_bulk_sec_desc *bsdv;
338         struct plain_bulk_token     *tokenv;
339         int                          rc;
340         int                          i, nob;
341
342         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
343         LASSERT(req->rq_pack_bulk);
344         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
345         LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
346
347         bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
348         tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
349
350         if (req->rq_bulk_write) {
351                 if (bsdv->bsd_flags & BSD_FL_ERR)
352                         return -EIO;
353                 return 0;
354         }
355
356         /* fix the actual data size */
357         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
358                 if (BD_GET_KIOV(desc, i).kiov_len +
359                     nob > desc->bd_nob_transferred) {
360                         BD_GET_KIOV(desc, i).kiov_len =
361                                 desc->bd_nob_transferred - nob;
362                 }
363                 nob += BD_GET_KIOV(desc, i).kiov_len;
364         }
365
366         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
367                                     tokenv);
368         if (rc)
369                 CERROR("bulk read: client verify failed: %d\n", rc);
370
371         return rc;
372 }
373
374 /****************************************
375  * sec apis                             *
376  ****************************************/
377
378 static
379 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
380 {
381         struct ptlrpc_cli_ctx  *ctx, *ctx_new;
382
383         OBD_ALLOC_PTR(ctx_new);
384
385         write_lock(&plsec->pls_lock);
386
387         ctx = plsec->pls_ctx;
388         if (ctx) {
389                 atomic_inc(&ctx->cc_refcount);
390
391                 if (ctx_new)
392                         OBD_FREE_PTR(ctx_new);
393         } else if (ctx_new) {
394                 ctx = ctx_new;
395
396                 atomic_set(&ctx->cc_refcount, 1);       /* for cache */
397                 ctx->cc_sec = &plsec->pls_base;
398                 ctx->cc_ops = &plain_ctx_ops;
399                 ctx->cc_expire = 0;
400                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
401                 ctx->cc_vcred.vc_uid = 0;
402                 spin_lock_init(&ctx->cc_lock);
403                 INIT_LIST_HEAD(&ctx->cc_req_list);
404                 INIT_LIST_HEAD(&ctx->cc_gc_chain);
405
406                 plsec->pls_ctx = ctx;
407                 atomic_inc(&plsec->pls_base.ps_nctx);
408                 atomic_inc(&plsec->pls_base.ps_refcount);
409
410                 atomic_inc(&ctx->cc_refcount);  /* for caller */
411         }
412
413         write_unlock(&plsec->pls_lock);
414
415         return ctx;
416 }
417
418 static
419 void plain_destroy_sec(struct ptlrpc_sec *sec)
420 {
421         struct plain_sec *plsec = sec2plsec(sec);
422         ENTRY;
423
424         LASSERT(sec->ps_policy == &plain_policy);
425         LASSERT(sec->ps_import);
426         LASSERT(atomic_read(&sec->ps_refcount) == 0);
427         LASSERT(atomic_read(&sec->ps_nctx) == 0);
428         LASSERT(plsec->pls_ctx == NULL);
429
430         class_import_put(sec->ps_import);
431
432         OBD_FREE_PTR(plsec);
433         EXIT;
434 }
435
436 static
437 void plain_kill_sec(struct ptlrpc_sec *sec)
438 {
439         sec->ps_dying = 1;
440 }
441
442 static
443 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
444                                     struct ptlrpc_svc_ctx *svc_ctx,
445                                     struct sptlrpc_flavor *sf)
446 {
447         struct plain_sec       *plsec;
448         struct ptlrpc_sec      *sec;
449         struct ptlrpc_cli_ctx  *ctx;
450         ENTRY;
451
452         LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
453
454         OBD_ALLOC_PTR(plsec);
455         if (plsec == NULL)
456                 RETURN(NULL);
457
458         /*
459          * initialize plain_sec
460          */
461         rwlock_init(&plsec->pls_lock);
462         plsec->pls_ctx = NULL;
463
464         sec = &plsec->pls_base;
465         sec->ps_policy = &plain_policy;
466         atomic_set(&sec->ps_refcount, 0);
467         atomic_set(&sec->ps_nctx, 0);
468         sec->ps_id = sptlrpc_get_next_secid();
469         sec->ps_import = class_import_get(imp);
470         sec->ps_flvr = *sf;
471         spin_lock_init(&sec->ps_lock);
472         INIT_LIST_HEAD(&sec->ps_gc_list);
473         sec->ps_gc_interval = 0;
474         sec->ps_gc_next = 0;
475
476         /* install ctx immediately if this is a reverse sec */
477         if (svc_ctx) {
478                 ctx = plain_sec_install_ctx(plsec);
479                 if (ctx == NULL) {
480                         plain_destroy_sec(sec);
481                         RETURN(NULL);
482                 }
483                 sptlrpc_cli_ctx_put(ctx, 1);
484         }
485
486         RETURN(sec);
487 }
488
489 static
490 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
491                                         struct vfs_cred *vcred,
492                                         int create, int remove_dead)
493 {
494         struct plain_sec       *plsec = sec2plsec(sec);
495         struct ptlrpc_cli_ctx  *ctx;
496         ENTRY;
497
498         read_lock(&plsec->pls_lock);
499         ctx = plsec->pls_ctx;
500         if (ctx)
501                 atomic_inc(&ctx->cc_refcount);
502         read_unlock(&plsec->pls_lock);
503
504         if (unlikely(ctx == NULL))
505                 ctx = plain_sec_install_ctx(plsec);
506
507         RETURN(ctx);
508 }
509
510 static
511 void plain_release_ctx(struct ptlrpc_sec *sec,
512                        struct ptlrpc_cli_ctx *ctx, int sync)
513 {
514         LASSERT(atomic_read(&sec->ps_refcount) > 0);
515         LASSERT(atomic_read(&sec->ps_nctx) > 0);
516         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
517         LASSERT(ctx->cc_sec == sec);
518
519         OBD_FREE_PTR(ctx);
520
521         atomic_dec(&sec->ps_nctx);
522         sptlrpc_sec_put(sec);
523 }
524
525 static
526 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
527                           uid_t uid, int grace, int force)
528 {
529         struct plain_sec       *plsec = sec2plsec(sec);
530         struct ptlrpc_cli_ctx  *ctx;
531         ENTRY;
532
533         /* do nothing unless caller want to flush for 'all' */
534         if (uid != -1)
535                 RETURN(0);
536
537         write_lock(&plsec->pls_lock);
538         ctx = plsec->pls_ctx;
539         plsec->pls_ctx = NULL;
540         write_unlock(&plsec->pls_lock);
541
542         if (ctx)
543                 sptlrpc_cli_ctx_put(ctx, 1);
544         RETURN(0);
545 }
546
547 static
548 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
549                        struct ptlrpc_request *req,
550                        int msgsize)
551 {
552         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
553         int   alloc_len;
554         ENTRY;
555
556         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
557         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
558
559         if (req->rq_pack_udesc)
560                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
561
562         if (req->rq_pack_bulk) {
563                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
564                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
565         }
566
567         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
568
569         if (!req->rq_reqbuf) {
570                 LASSERT(!req->rq_pool);
571
572                 alloc_len = size_roundup_power2(alloc_len);
573                 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
574                 if (!req->rq_reqbuf)
575                         RETURN(-ENOMEM);
576
577                 req->rq_reqbuf_len = alloc_len;
578         } else {
579                 LASSERT(req->rq_pool);
580                 LASSERT(req->rq_reqbuf_len >= alloc_len);
581                 memset(req->rq_reqbuf, 0, alloc_len);
582         }
583
584         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
585         req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
586
587         if (req->rq_pack_udesc)
588                 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
589
590         RETURN(0);
591 }
592
593 static
594 void plain_free_reqbuf(struct ptlrpc_sec *sec,
595                        struct ptlrpc_request *req)
596 {
597         ENTRY;
598         if (!req->rq_pool) {
599                 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
600                 req->rq_reqbuf = NULL;
601                 req->rq_reqbuf_len = 0;
602         }
603         EXIT;
604 }
605
606 static
607 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
608                        struct ptlrpc_request *req,
609                        int msgsize)
610 {
611         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
612         int alloc_len;
613         ENTRY;
614
615         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
616         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
617
618         if (req->rq_pack_bulk) {
619                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
620                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
621         }
622
623         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
624
625         /* add space for early reply */
626         alloc_len += plain_at_offset;
627
628         alloc_len = size_roundup_power2(alloc_len);
629
630         OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
631         if (!req->rq_repbuf)
632                 RETURN(-ENOMEM);
633
634         req->rq_repbuf_len = alloc_len;
635         RETURN(0);
636 }
637
638 static
639 void plain_free_repbuf(struct ptlrpc_sec *sec,
640                        struct ptlrpc_request *req)
641 {
642         ENTRY;
643         OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
644         req->rq_repbuf = NULL;
645         req->rq_repbuf_len = 0;
646         EXIT;
647 }
648
649 static
650 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
651                          struct ptlrpc_request *req,
652                          int segment, int newsize)
653 {
654         struct lustre_msg      *newbuf;
655         int                     oldsize;
656         int                     newmsg_size, newbuf_size;
657         ENTRY;
658
659         LASSERT(req->rq_reqbuf);
660         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
661         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
662                 req->rq_reqmsg);
663
664         /* compute new embedded msg size.  */
665         oldsize = req->rq_reqmsg->lm_buflens[segment];
666         req->rq_reqmsg->lm_buflens[segment] = newsize;
667         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
668                                          req->rq_reqmsg->lm_buflens);
669         req->rq_reqmsg->lm_buflens[segment] = oldsize;
670
671         /* compute new wrapper msg size.  */
672         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
673         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
674         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
675                                          req->rq_reqbuf->lm_buflens);
676         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
677
678         /* request from pool should always have enough buffer */
679         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
680
681         if (req->rq_reqbuf_len < newbuf_size) {
682                 newbuf_size = size_roundup_power2(newbuf_size);
683
684                 OBD_ALLOC_LARGE(newbuf, newbuf_size);
685                 if (newbuf == NULL)
686                         RETURN(-ENOMEM);
687
688                 /* Must lock this, so that otherwise unprotected change of
689                  * rq_reqmsg is not racing with parallel processing of
690                  * imp_replay_list traversing threads. See LU-3333
691                  * This is a bandaid at best, we really need to deal with this
692                  * in request enlarging code before unpacking that's already
693                  * there */
694                 if (req->rq_import)
695                         spin_lock(&req->rq_import->imp_lock);
696
697                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
698
699                 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
700                 req->rq_reqbuf = newbuf;
701                 req->rq_reqbuf_len = newbuf_size;
702                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
703                                                 PLAIN_PACK_MSG_OFF, 0);
704
705                 if (req->rq_import)
706                         spin_unlock(&req->rq_import->imp_lock);
707         }
708
709         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
710                                      newmsg_size);
711         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
712
713         req->rq_reqlen = newmsg_size;
714         RETURN(0);
715 }
716
717 /****************************************
718  * service apis                         *
719  ****************************************/
720
721 static struct ptlrpc_svc_ctx plain_svc_ctx = {
722         .sc_refcount    = ATOMIC_INIT(1),
723         .sc_policy      = &plain_policy,
724 };
725
726 static
727 int plain_accept(struct ptlrpc_request *req)
728 {
729         struct lustre_msg   *msg = req->rq_reqbuf;
730         struct plain_header *phdr;
731         int                  swabbed;
732         ENTRY;
733
734         LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
735                 SPTLRPC_POLICY_PLAIN);
736
737         if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
738             SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
739             SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
740             SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
741                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
742                 RETURN(SECSVC_DROP);
743         }
744
745         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
746                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
747                 RETURN(SECSVC_DROP);
748         }
749
750         swabbed = ptlrpc_req_need_swab(req);
751
752         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
753         if (phdr == NULL) {
754                 CERROR("missing plain header\n");
755                 RETURN(-EPROTO);
756         }
757
758         if (phdr->ph_ver != 0) {
759                 CERROR("Invalid header version\n");
760                 RETURN(-EPROTO);
761         }
762
763         if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
764                 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
765                 RETURN(-EPROTO);
766         }
767
768         req->rq_sp_from = phdr->ph_sp;
769         req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
770
771         if (phdr->ph_flags & PLAIN_FL_USER) {
772                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
773                                              swabbed)) {
774                         CERROR("Mal-formed user descriptor\n");
775                         RETURN(SECSVC_DROP);
776                 }
777
778                 req->rq_pack_udesc = 1;
779                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
780         }
781
782         if (phdr->ph_flags & PLAIN_FL_BULK) {
783                 if (plain_unpack_bsd(msg, swabbed))
784                         RETURN(SECSVC_DROP);
785
786                 req->rq_pack_bulk = 1;
787         }
788
789         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
790         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
791
792         req->rq_svc_ctx = &plain_svc_ctx;
793         atomic_inc(&req->rq_svc_ctx->sc_refcount);
794
795         RETURN(SECSVC_OK);
796 }
797
798 static
799 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
800 {
801         struct ptlrpc_reply_state   *rs;
802         __u32                        buflens[PLAIN_PACK_SEGMENTS] = { 0, };
803         int                          rs_size = sizeof(*rs);
804         ENTRY;
805
806         LASSERT(msgsize % 8 == 0);
807
808         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
809         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
810
811         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
812                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
813
814         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
815
816         rs = req->rq_reply_state;
817
818         if (rs) {
819                 /* pre-allocated */
820                 LASSERT(rs->rs_size >= rs_size);
821         } else {
822                 OBD_ALLOC_LARGE(rs, rs_size);
823                 if (rs == NULL)
824                         RETURN(-ENOMEM);
825
826                 rs->rs_size = rs_size;
827         }
828
829         rs->rs_svc_ctx = req->rq_svc_ctx;
830         atomic_inc(&req->rq_svc_ctx->sc_refcount);
831         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
832         rs->rs_repbuf_len = rs_size - sizeof(*rs);
833
834         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
835         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
836
837         req->rq_reply_state = rs;
838         RETURN(0);
839 }
840
841 static
842 void plain_free_rs(struct ptlrpc_reply_state *rs)
843 {
844         ENTRY;
845
846         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
847         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
848
849         if (!rs->rs_prealloc)
850                 OBD_FREE_LARGE(rs, rs->rs_size);
851         EXIT;
852 }
853
854 static
855 int plain_authorize(struct ptlrpc_request *req)
856 {
857         struct ptlrpc_reply_state *rs = req->rq_reply_state;
858         struct lustre_msg_v2      *msg = rs->rs_repbuf;
859         struct plain_header       *phdr;
860         int                        len;
861         ENTRY;
862
863         LASSERT(rs);
864         LASSERT(msg);
865
866         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
867                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
868                                         req->rq_replen, 1);
869         else
870                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
871
872         msg->lm_secflvr = req->rq_flvr.sf_rpc;
873
874         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
875         phdr->ph_ver = 0;
876         phdr->ph_flags = 0;
877         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
878
879         if (req->rq_pack_bulk)
880                 phdr->ph_flags |= PLAIN_FL_BULK;
881
882         rs->rs_repdata_len = len;
883
884         if (likely(req->rq_packed_final)) {
885                 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
886                         req->rq_reply_off = plain_at_offset;
887                 else
888                         req->rq_reply_off = 0;
889         } else {
890                 unsigned int hsize = 4;
891
892                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
893                         lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
894                         lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
895                         NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
896                 req->rq_reply_off = 0;
897         }
898
899         RETURN(0);
900 }
901
902 static
903 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
904                           struct ptlrpc_bulk_desc *desc)
905 {
906         struct ptlrpc_reply_state   *rs = req->rq_reply_state;
907         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
908         struct plain_bulk_token     *tokenr;
909         int                          rc;
910
911         LASSERT(req->rq_bulk_write);
912         LASSERT(req->rq_pack_bulk);
913
914         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
915         tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
916         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
917
918         bsdv->bsd_version = 0;
919         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
920         bsdv->bsd_svc = bsdr->bsd_svc;
921         bsdv->bsd_flags = 0;
922
923         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
924                 return 0;
925
926         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
927                                     tokenr);
928         if (rc) {
929                 bsdv->bsd_flags |= BSD_FL_ERR;
930                 CERROR("bulk write: server verify failed: %d\n", rc);
931         }
932
933         return rc;
934 }
935
936 static
937 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
938                         struct ptlrpc_bulk_desc *desc)
939 {
940         struct ptlrpc_reply_state   *rs = req->rq_reply_state;
941         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
942         struct plain_bulk_token     *tokenv;
943         int                          rc;
944
945         LASSERT(req->rq_bulk_read);
946         LASSERT(req->rq_pack_bulk);
947
948         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
949         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
950         tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
951
952         bsdv->bsd_version = 0;
953         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
954         bsdv->bsd_svc = bsdr->bsd_svc;
955         bsdv->bsd_flags = 0;
956
957         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
958                 return 0;
959
960         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
961                                       tokenv);
962         if (rc) {
963                 CERROR("bulk read: server failed to compute "
964                        "checksum: %d\n", rc);
965         } else {
966                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
967                         corrupt_bulk_data(desc);
968         }
969
970         return rc;
971 }
972
973 static struct ptlrpc_ctx_ops plain_ctx_ops = {
974         .refresh                = plain_ctx_refresh,
975         .validate               = plain_ctx_validate,
976         .sign                   = plain_ctx_sign,
977         .verify                 = plain_ctx_verify,
978         .wrap_bulk              = plain_cli_wrap_bulk,
979         .unwrap_bulk            = plain_cli_unwrap_bulk,
980 };
981
982 static struct ptlrpc_sec_cops plain_sec_cops = {
983         .create_sec             = plain_create_sec,
984         .destroy_sec            = plain_destroy_sec,
985         .kill_sec               = plain_kill_sec,
986         .lookup_ctx             = plain_lookup_ctx,
987         .release_ctx            = plain_release_ctx,
988         .flush_ctx_cache        = plain_flush_ctx_cache,
989         .alloc_reqbuf           = plain_alloc_reqbuf,
990         .free_reqbuf            = plain_free_reqbuf,
991         .alloc_repbuf           = plain_alloc_repbuf,
992         .free_repbuf            = plain_free_repbuf,
993         .enlarge_reqbuf         = plain_enlarge_reqbuf,
994 };
995
996 static struct ptlrpc_sec_sops plain_sec_sops = {
997         .accept                 = plain_accept,
998         .alloc_rs               = plain_alloc_rs,
999         .authorize              = plain_authorize,
1000         .free_rs                = plain_free_rs,
1001         .unwrap_bulk            = plain_svc_unwrap_bulk,
1002         .wrap_bulk              = plain_svc_wrap_bulk,
1003 };
1004
1005 static struct ptlrpc_sec_policy plain_policy = {
1006         .sp_owner               = THIS_MODULE,
1007         .sp_name                = "plain",
1008         .sp_policy              = SPTLRPC_POLICY_PLAIN,
1009         .sp_cops                = &plain_sec_cops,
1010         .sp_sops                = &plain_sec_sops,
1011 };
1012
1013 int sptlrpc_plain_init(void)
1014 {
1015         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1016         int rc;
1017
1018         buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1019         plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1020
1021         rc = sptlrpc_register_policy(&plain_policy);
1022         if (rc)
1023                 CERROR("failed to register: %d\n", rc);
1024
1025         return rc;
1026 }
1027
1028 void sptlrpc_plain_fini(void)
1029 {
1030         int rc;
1031
1032         rc = sptlrpc_unregister_policy(&plain_policy);
1033         if (rc)
1034                 CERROR("cannot unregister: %d\n", rc);
1035 }