Whamcloud - gitweb
LU-6142 tests: Fix style issues for chownmany.c
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec_plain.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39
40 #include <obd_support.h>
41 #include <obd_cksum.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lustre_sec.h>
45
46 #include "ptlrpc_internal.h"
47
48 struct plain_sec {
49         struct ptlrpc_sec pls_base;
50         rwlock_t pls_lock;
51         struct ptlrpc_cli_ctx *pls_ctx;
52 };
53
54 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
55 {
56         return container_of(sec, struct plain_sec, pls_base);
57 }
58
59 static struct ptlrpc_sec_policy plain_policy;
60 static struct ptlrpc_ctx_ops    plain_ctx_ops;
61 static struct ptlrpc_svc_ctx    plain_svc_ctx;
62
63 static unsigned int plain_at_offset;
64
65 /*
66  * for simplicity, plain policy rpc use fixed layout.
67  */
68 #define PLAIN_PACK_SEGMENTS             (4)
69
70 #define PLAIN_PACK_HDR_OFF              (0)
71 #define PLAIN_PACK_MSG_OFF              (1)
72 #define PLAIN_PACK_USER_OFF             (2)
73 #define PLAIN_PACK_BULK_OFF             (3)
74
75 #define PLAIN_FL_USER                   (0x01)
76 #define PLAIN_FL_BULK                   (0x02)
77
78 struct plain_header {
79         __u8 ph_ver;            /* 0 */
80         __u8 ph_flags;
81         __u8 ph_sp;             /* source */
82         __u8 ph_bulk_hash_alg;  /* complete flavor desc */
83         __u8 ph_pad[4];
84 };
85
86 struct plain_bulk_token {
87         __u8 pbt_hash[8];
88 };
89
90 #define PLAIN_BSD_SIZE \
91         (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
92
93 /*
94  * bulk checksum helpers
95  */
96
97 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
98 {
99         struct ptlrpc_bulk_sec_desc *bsd;
100
101         if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
102                 return -EPROTO;
103
104         bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
105         if (bsd == NULL) {
106                 CERROR("bulk sec desc has short size %d\n",
107                        lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
108                 return -EPROTO;
109         }
110
111         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
112             bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
113                 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
114                 return -EPROTO;
115         }
116
117         return 0;
118 }
119
120 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
121                                     __u8 hash_alg,
122                                     struct plain_bulk_token *token)
123 {
124         if (hash_alg == BULK_HASH_ALG_NULL)
125                 return 0;
126
127         memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
128         return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
129                                          sizeof(token->pbt_hash));
130 }
131
132 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
133                                   __u8 hash_alg,
134                                   struct plain_bulk_token *tokenr)
135 {
136         struct plain_bulk_token tokenv;
137         int rc;
138
139         if (hash_alg == BULK_HASH_ALG_NULL)
140                 return 0;
141
142         memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
143         rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
144                                        sizeof(tokenv.pbt_hash));
145         if (rc)
146                 return rc;
147
148         if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
149                 return -EACCES;
150         return 0;
151 }
152
153 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
154 {
155         char *ptr;
156         unsigned int off, i;
157
158         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
159
160         for (i = 0; i < desc->bd_iov_count; i++) {
161                 if (BD_GET_KIOV(desc, i).kiov_len == 0)
162                         continue;
163
164                 ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
165                 off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
166                 ptr[off] ^= 0x1;
167                 kunmap(BD_GET_KIOV(desc, i).kiov_page);
168                 return;
169         }
170 }
171
172 /*
173  * cli_ctx apis
174  */
175
176 static
177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
178 {
179         /* should never reach here */
180         LBUG();
181         return 0;
182 }
183
184 static
185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
186 {
187         return 0;
188 }
189
190 static
191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
192 {
193         struct lustre_msg *msg = req->rq_reqbuf;
194         struct plain_header *phdr;
195
196         ENTRY;
197
198         msg->lm_secflvr = req->rq_flvr.sf_rpc;
199
200         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
201         phdr->ph_ver = 0;
202         phdr->ph_flags = 0;
203         phdr->ph_sp = ctx->cc_sec->ps_part;
204         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
205
206         if (req->rq_pack_udesc)
207                 phdr->ph_flags |= PLAIN_FL_USER;
208         if (req->rq_pack_bulk)
209                 phdr->ph_flags |= PLAIN_FL_BULK;
210
211         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
212                                                  msg->lm_buflens);
213         RETURN(0);
214 }
215
216 static
217 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
218 {
219         struct lustre_msg *msg = req->rq_repdata;
220         struct plain_header *phdr;
221         __u32 cksum;
222         bool swabbed;
223
224         ENTRY;
225         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
226                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
227                 RETURN(-EPROTO);
228         }
229
230         swabbed = ptlrpc_rep_need_swab(req);
231
232         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
233         if (phdr == NULL) {
234                 CERROR("missing plain header\n");
235                 RETURN(-EPROTO);
236         }
237
238         if (phdr->ph_ver != 0) {
239                 CERROR("Invalid header version\n");
240                 RETURN(-EPROTO);
241         }
242
243         /* expect no user desc in reply */
244         if (phdr->ph_flags & PLAIN_FL_USER) {
245                 CERROR("Unexpected udesc flag in reply\n");
246                 RETURN(-EPROTO);
247         }
248
249         if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
250                 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
251                        req->rq_flvr.u_bulk.hash.hash_alg);
252                 RETURN(-EPROTO);
253         }
254
255         if (unlikely(req->rq_early)) {
256                 unsigned int hsize = 4;
257
258                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
259                                 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
260                                 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
261                                 NULL, 0, (unsigned char *)&cksum, &hsize);
262                 if (cksum != msg->lm_cksum) {
263                         CDEBUG(D_SEC,
264                                "early reply checksum mismatch: %08x != %08x\n",
265                                cpu_to_le32(cksum), msg->lm_cksum);
266                         RETURN(-EINVAL);
267                 }
268         } else {
269                 /*
270                  * whether we sent with bulk or not, we expect the same
271                  * in reply, except for early reply
272                  */
273                 if (!req->rq_early &&
274                     !equi(req->rq_pack_bulk == 1,
275                         phdr->ph_flags & PLAIN_FL_BULK)) {
276                         CERROR("%s bulk checksum in reply\n",
277                                req->rq_pack_bulk ? "Missing" : "Unexpected");
278                         RETURN(-EPROTO);
279                 }
280
281                 if (phdr->ph_flags & PLAIN_FL_BULK) {
282                         if (plain_unpack_bsd(msg, swabbed))
283                                 RETURN(-EPROTO);
284                 }
285         }
286
287         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
288         req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
289         RETURN(0);
290 }
291
292 static
293 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
294                         struct ptlrpc_request *req,
295                         struct ptlrpc_bulk_desc *desc)
296 {
297         struct ptlrpc_bulk_sec_desc *bsd;
298         struct plain_bulk_token *token;
299         int rc;
300
301         LASSERT(req->rq_pack_bulk);
302         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
303
304         bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
305         token = (struct plain_bulk_token *) bsd->bsd_data;
306
307         bsd->bsd_version = 0;
308         bsd->bsd_flags = 0;
309         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
310         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
311
312         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
313                 RETURN(0);
314
315         if (req->rq_bulk_read)
316                 RETURN(0);
317
318         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
319                                       token);
320         if (rc) {
321                 CERROR("bulk write: failed to compute checksum: %d\n", rc);
322         } else {
323                 /*
324                  * for sending we only compute the wrong checksum instead
325                  * of corrupting the data so it is still correct on a redo
326                  */
327                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
328                     req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
329                         token->pbt_hash[0] ^= 0x1;
330         }
331
332         return rc;
333 }
334
335 static
336 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
337                           struct ptlrpc_request *req,
338                           struct ptlrpc_bulk_desc *desc)
339 {
340         struct ptlrpc_bulk_sec_desc *bsdv;
341         struct plain_bulk_token *tokenv;
342         int rc;
343         int i, nob;
344
345         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
346         LASSERT(req->rq_pack_bulk);
347         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
348         LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
349
350         bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
351         tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
352
353         if (req->rq_bulk_write) {
354                 if (bsdv->bsd_flags & BSD_FL_ERR)
355                         return -EIO;
356                 return 0;
357         }
358
359         /* fix the actual data size */
360         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
361                 if (BD_GET_KIOV(desc, i).kiov_len +
362                     nob > desc->bd_nob_transferred) {
363                         BD_GET_KIOV(desc, i).kiov_len =
364                                 desc->bd_nob_transferred - nob;
365                 }
366                 nob += BD_GET_KIOV(desc, i).kiov_len;
367         }
368
369         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
370                                     tokenv);
371         if (rc)
372                 CERROR("bulk read: client verify failed: %d\n", rc);
373
374         return rc;
375 }
376
377 /*
378  * sec apis
379  */
380
381 static
382 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
383 {
384         struct ptlrpc_cli_ctx  *ctx, *ctx_new;
385
386         OBD_ALLOC_PTR(ctx_new);
387
388         write_lock(&plsec->pls_lock);
389
390         ctx = plsec->pls_ctx;
391         if (ctx) {
392                 atomic_inc(&ctx->cc_refcount);
393
394                 if (ctx_new)
395                         OBD_FREE_PTR(ctx_new);
396         } else if (ctx_new) {
397                 ctx = ctx_new;
398
399                 atomic_set(&ctx->cc_refcount, 1);       /* for cache */
400                 ctx->cc_sec = &plsec->pls_base;
401                 ctx->cc_ops = &plain_ctx_ops;
402                 ctx->cc_expire = 0;
403                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
404                 ctx->cc_vcred.vc_uid = 0;
405                 spin_lock_init(&ctx->cc_lock);
406                 INIT_LIST_HEAD(&ctx->cc_req_list);
407                 INIT_LIST_HEAD(&ctx->cc_gc_chain);
408
409                 plsec->pls_ctx = ctx;
410                 atomic_inc(&plsec->pls_base.ps_nctx);
411                 atomic_inc(&plsec->pls_base.ps_refcount);
412
413                 atomic_inc(&ctx->cc_refcount);  /* for caller */
414         }
415
416         write_unlock(&plsec->pls_lock);
417
418         return ctx;
419 }
420
421 static
422 void plain_destroy_sec(struct ptlrpc_sec *sec)
423 {
424         struct plain_sec *plsec = sec2plsec(sec);
425
426         ENTRY;
427
428         LASSERT(sec->ps_policy == &plain_policy);
429         LASSERT(sec->ps_import);
430         LASSERT(atomic_read(&sec->ps_refcount) == 0);
431         LASSERT(atomic_read(&sec->ps_nctx) == 0);
432         LASSERT(plsec->pls_ctx == NULL);
433
434         class_import_put(sec->ps_import);
435
436         OBD_FREE_PTR(plsec);
437         EXIT;
438 }
439
440 static
441 void plain_kill_sec(struct ptlrpc_sec *sec)
442 {
443         sec->ps_dying = 1;
444 }
445
446 static
447 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
448                                     struct ptlrpc_svc_ctx *svc_ctx,
449                                     struct sptlrpc_flavor *sf)
450 {
451         struct plain_sec *plsec;
452         struct ptlrpc_sec *sec;
453         struct ptlrpc_cli_ctx *ctx;
454
455         ENTRY;
456
457         LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
458
459         OBD_ALLOC_PTR(plsec);
460         if (plsec == NULL)
461                 RETURN(NULL);
462
463         /*
464          * initialize plain_sec
465          */
466         rwlock_init(&plsec->pls_lock);
467         plsec->pls_ctx = NULL;
468
469         sec = &plsec->pls_base;
470         sec->ps_policy = &plain_policy;
471         atomic_set(&sec->ps_refcount, 0);
472         atomic_set(&sec->ps_nctx, 0);
473         sec->ps_id = sptlrpc_get_next_secid();
474         sec->ps_import = class_import_get(imp);
475         sec->ps_flvr = *sf;
476         spin_lock_init(&sec->ps_lock);
477         INIT_LIST_HEAD(&sec->ps_gc_list);
478         sec->ps_gc_interval = 0;
479         sec->ps_gc_next = 0;
480
481         /* install ctx immediately if this is a reverse sec */
482         if (svc_ctx) {
483                 ctx = plain_sec_install_ctx(plsec);
484                 if (ctx == NULL) {
485                         plain_destroy_sec(sec);
486                         RETURN(NULL);
487                 }
488                 sptlrpc_cli_ctx_put(ctx, 1);
489         }
490
491         RETURN(sec);
492 }
493
494 static
495 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
496                                         struct vfs_cred *vcred,
497                                         int create, int remove_dead)
498 {
499         struct plain_sec *plsec = sec2plsec(sec);
500         struct ptlrpc_cli_ctx *ctx;
501
502         ENTRY;
503
504         read_lock(&plsec->pls_lock);
505         ctx = plsec->pls_ctx;
506         if (ctx)
507                 atomic_inc(&ctx->cc_refcount);
508         read_unlock(&plsec->pls_lock);
509
510         if (unlikely(ctx == NULL))
511                 ctx = plain_sec_install_ctx(plsec);
512
513         RETURN(ctx);
514 }
515
516 static
517 void plain_release_ctx(struct ptlrpc_sec *sec,
518                        struct ptlrpc_cli_ctx *ctx, int sync)
519 {
520         LASSERT(atomic_read(&sec->ps_refcount) > 0);
521         LASSERT(atomic_read(&sec->ps_nctx) > 0);
522         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
523         LASSERT(ctx->cc_sec == sec);
524
525         OBD_FREE_PTR(ctx);
526
527         atomic_dec(&sec->ps_nctx);
528         sptlrpc_sec_put(sec);
529 }
530
531 static
532 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
533                           uid_t uid, int grace, int force)
534 {
535         struct plain_sec *plsec = sec2plsec(sec);
536         struct ptlrpc_cli_ctx *ctx;
537
538         ENTRY;
539
540         /* do nothing unless caller want to flush for 'all' */
541         if (uid != -1)
542                 RETURN(0);
543
544         write_lock(&plsec->pls_lock);
545         ctx = plsec->pls_ctx;
546         plsec->pls_ctx = NULL;
547         write_unlock(&plsec->pls_lock);
548
549         if (ctx)
550                 sptlrpc_cli_ctx_put(ctx, 1);
551         RETURN(0);
552 }
553
554 static
555 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
556                        struct ptlrpc_request *req,
557                        int msgsize)
558 {
559         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
560         int alloc_len;
561
562         ENTRY;
563
564         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
565         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
566
567         if (req->rq_pack_udesc)
568                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
569
570         if (req->rq_pack_bulk) {
571                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
572                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
573         }
574
575         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
576
577         if (!req->rq_reqbuf) {
578                 LASSERT(!req->rq_pool);
579
580                 alloc_len = size_roundup_power2(alloc_len);
581                 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
582                 if (!req->rq_reqbuf)
583                         RETURN(-ENOMEM);
584
585                 req->rq_reqbuf_len = alloc_len;
586         } else {
587                 LASSERT(req->rq_pool);
588                 LASSERT(req->rq_reqbuf_len >= alloc_len);
589                 memset(req->rq_reqbuf, 0, alloc_len);
590         }
591
592         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
593         req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
594
595         if (req->rq_pack_udesc)
596                 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
597
598         RETURN(0);
599 }
600
601 static
602 void plain_free_reqbuf(struct ptlrpc_sec *sec,
603                        struct ptlrpc_request *req)
604 {
605         ENTRY;
606         if (!req->rq_pool) {
607                 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
608                 req->rq_reqbuf = NULL;
609                 req->rq_reqbuf_len = 0;
610         }
611         EXIT;
612 }
613
614 static
615 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
616                        struct ptlrpc_request *req,
617                        int msgsize)
618 {
619         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
620         int alloc_len;
621
622         ENTRY;
623
624         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
625         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
626
627         if (req->rq_pack_bulk) {
628                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
629                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
630         }
631
632         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
633
634         /* add space for early reply */
635         alloc_len += plain_at_offset;
636
637         alloc_len = size_roundup_power2(alloc_len);
638
639         OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
640         if (!req->rq_repbuf)
641                 RETURN(-ENOMEM);
642
643         req->rq_repbuf_len = alloc_len;
644         RETURN(0);
645 }
646
647 static
648 void plain_free_repbuf(struct ptlrpc_sec *sec,
649                        struct ptlrpc_request *req)
650 {
651         ENTRY;
652         OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
653         req->rq_repbuf = NULL;
654         req->rq_repbuf_len = 0;
655         EXIT;
656 }
657
658 static
659 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
660                          struct ptlrpc_request *req,
661                          int segment, int newsize)
662 {
663         struct lustre_msg *newbuf;
664         int oldsize;
665         int newmsg_size, newbuf_size;
666
667         ENTRY;
668
669         LASSERT(req->rq_reqbuf);
670         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
671         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
672                 req->rq_reqmsg);
673
674         /* compute new embedded msg size.  */
675         oldsize = req->rq_reqmsg->lm_buflens[segment];
676         req->rq_reqmsg->lm_buflens[segment] = newsize;
677         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
678                                          req->rq_reqmsg->lm_buflens);
679         req->rq_reqmsg->lm_buflens[segment] = oldsize;
680
681         /* compute new wrapper msg size.  */
682         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
683         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
684         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
685                                          req->rq_reqbuf->lm_buflens);
686         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
687
688         /* request from pool should always have enough buffer */
689         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
690
691         if (req->rq_reqbuf_len < newbuf_size) {
692                 newbuf_size = size_roundup_power2(newbuf_size);
693
694                 OBD_ALLOC_LARGE(newbuf, newbuf_size);
695                 if (newbuf == NULL)
696                         RETURN(-ENOMEM);
697
698                 /*
699                  * Must lock this, so that otherwise unprotected change of
700                  * rq_reqmsg is not racing with parallel processing of
701                  * imp_replay_list traversing threads. See LU-3333
702                  * This is a bandaid at best, we really need to deal with this
703                  * in request enlarging code before unpacking that's already
704                  * there
705                  */
706                 if (req->rq_import)
707                         spin_lock(&req->rq_import->imp_lock);
708
709                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
710
711                 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
712                 req->rq_reqbuf = newbuf;
713                 req->rq_reqbuf_len = newbuf_size;
714                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
715                                                 PLAIN_PACK_MSG_OFF, 0);
716
717                 if (req->rq_import)
718                         spin_unlock(&req->rq_import->imp_lock);
719         }
720
721         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
722                                      newmsg_size);
723         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
724
725         req->rq_reqlen = newmsg_size;
726         RETURN(0);
727 }
728
729 /*
730  * service apis
731  */
732
733 static struct ptlrpc_svc_ctx plain_svc_ctx = {
734         .sc_refcount    = ATOMIC_INIT(1),
735         .sc_policy      = &plain_policy,
736 };
737
738 static int plain_accept(struct ptlrpc_request *req)
739 {
740         struct lustre_msg *msg = req->rq_reqbuf;
741         struct plain_header *phdr;
742         bool swabbed;
743
744         ENTRY;
745         LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
746                 SPTLRPC_POLICY_PLAIN);
747
748         if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
749             SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
750             SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
751             SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
752                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
753                 RETURN(SECSVC_DROP);
754         }
755
756         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
757                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
758                 RETURN(SECSVC_DROP);
759         }
760
761         swabbed = ptlrpc_req_need_swab(req);
762
763         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
764         if (phdr == NULL) {
765                 CERROR("missing plain header\n");
766                 RETURN(-EPROTO);
767         }
768
769         if (phdr->ph_ver != 0) {
770                 CERROR("Invalid header version\n");
771                 RETURN(-EPROTO);
772         }
773
774         if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
775                 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
776                 RETURN(-EPROTO);
777         }
778
779         req->rq_sp_from = phdr->ph_sp;
780         req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
781
782         if (phdr->ph_flags & PLAIN_FL_USER) {
783                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
784                                              swabbed)) {
785                         CERROR("Mal-formed user descriptor\n");
786                         RETURN(SECSVC_DROP);
787                 }
788
789                 req->rq_pack_udesc = 1;
790                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
791         }
792
793         if (phdr->ph_flags & PLAIN_FL_BULK) {
794                 if (plain_unpack_bsd(msg, swabbed))
795                         RETURN(SECSVC_DROP);
796
797                 req->rq_pack_bulk = 1;
798         }
799
800         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
801         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
802
803         req->rq_svc_ctx = &plain_svc_ctx;
804         atomic_inc(&req->rq_svc_ctx->sc_refcount);
805
806         RETURN(SECSVC_OK);
807 }
808
809 static
810 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
811 {
812         struct ptlrpc_reply_state *rs;
813         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
814         int rs_size = sizeof(*rs);
815
816         ENTRY;
817
818         LASSERT(msgsize % 8 == 0);
819
820         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
821         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
822
823         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
824                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
825
826         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
827
828         rs = req->rq_reply_state;
829
830         if (rs) {
831                 /* pre-allocated */
832                 LASSERT(rs->rs_size >= rs_size);
833         } else {
834                 OBD_ALLOC_LARGE(rs, rs_size);
835                 if (rs == NULL)
836                         RETURN(-ENOMEM);
837
838                 rs->rs_size = rs_size;
839         }
840
841         rs->rs_svc_ctx = req->rq_svc_ctx;
842         atomic_inc(&req->rq_svc_ctx->sc_refcount);
843         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
844         rs->rs_repbuf_len = rs_size - sizeof(*rs);
845
846         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
847         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
848
849         req->rq_reply_state = rs;
850         RETURN(0);
851 }
852
853 static
854 void plain_free_rs(struct ptlrpc_reply_state *rs)
855 {
856         ENTRY;
857
858         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
859         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
860
861         if (!rs->rs_prealloc)
862                 OBD_FREE_LARGE(rs, rs->rs_size);
863         EXIT;
864 }
865
866 static
867 int plain_authorize(struct ptlrpc_request *req)
868 {
869         struct ptlrpc_reply_state *rs = req->rq_reply_state;
870         struct lustre_msg_v2 *msg = rs->rs_repbuf;
871         struct plain_header *phdr;
872         int len;
873
874         ENTRY;
875
876         LASSERT(rs);
877         LASSERT(msg);
878
879         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
880                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
881                                         req->rq_replen, 1);
882         else
883                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
884
885         msg->lm_secflvr = req->rq_flvr.sf_rpc;
886
887         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
888         phdr->ph_ver = 0;
889         phdr->ph_flags = 0;
890         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
891
892         if (req->rq_pack_bulk)
893                 phdr->ph_flags |= PLAIN_FL_BULK;
894
895         rs->rs_repdata_len = len;
896
897         if (likely(req->rq_packed_final)) {
898                 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
899                         req->rq_reply_off = plain_at_offset;
900                 else
901                         req->rq_reply_off = 0;
902         } else {
903                 unsigned int hsize = 4;
904
905                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
906                         lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
907                         lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
908                         NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
909                 req->rq_reply_off = 0;
910         }
911
912         RETURN(0);
913 }
914
915 static
916 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
917                           struct ptlrpc_bulk_desc *desc)
918 {
919         struct ptlrpc_reply_state *rs = req->rq_reply_state;
920         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
921         struct plain_bulk_token *tokenr;
922         int rc;
923
924         LASSERT(req->rq_bulk_write);
925         LASSERT(req->rq_pack_bulk);
926
927         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
928         tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
929         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
930
931         bsdv->bsd_version = 0;
932         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
933         bsdv->bsd_svc = bsdr->bsd_svc;
934         bsdv->bsd_flags = 0;
935
936         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
937                 return 0;
938
939         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
940                                     tokenr);
941         if (rc) {
942                 bsdv->bsd_flags |= BSD_FL_ERR;
943                 CERROR("bulk write: server verify failed: %d\n", rc);
944         }
945
946         return rc;
947 }
948
949 static
950 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
951                         struct ptlrpc_bulk_desc *desc)
952 {
953         struct ptlrpc_reply_state *rs = req->rq_reply_state;
954         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
955         struct plain_bulk_token *tokenv;
956         int rc;
957
958         LASSERT(req->rq_bulk_read);
959         LASSERT(req->rq_pack_bulk);
960
961         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
962         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
963         tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
964
965         bsdv->bsd_version = 0;
966         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
967         bsdv->bsd_svc = bsdr->bsd_svc;
968         bsdv->bsd_flags = 0;
969
970         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
971                 return 0;
972
973         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
974                                       tokenv);
975         if (rc) {
976                 CERROR("bulk read: server failed to compute checksum: %d\n",
977                        rc);
978         } else {
979                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
980                         corrupt_bulk_data(desc);
981         }
982
983         return rc;
984 }
985
986 static struct ptlrpc_ctx_ops plain_ctx_ops = {
987         .refresh                = plain_ctx_refresh,
988         .validate               = plain_ctx_validate,
989         .sign                   = plain_ctx_sign,
990         .verify                 = plain_ctx_verify,
991         .wrap_bulk              = plain_cli_wrap_bulk,
992         .unwrap_bulk            = plain_cli_unwrap_bulk,
993 };
994
995 static struct ptlrpc_sec_cops plain_sec_cops = {
996         .create_sec             = plain_create_sec,
997         .destroy_sec            = plain_destroy_sec,
998         .kill_sec               = plain_kill_sec,
999         .lookup_ctx             = plain_lookup_ctx,
1000         .release_ctx            = plain_release_ctx,
1001         .flush_ctx_cache        = plain_flush_ctx_cache,
1002         .alloc_reqbuf           = plain_alloc_reqbuf,
1003         .free_reqbuf            = plain_free_reqbuf,
1004         .alloc_repbuf           = plain_alloc_repbuf,
1005         .free_repbuf            = plain_free_repbuf,
1006         .enlarge_reqbuf         = plain_enlarge_reqbuf,
1007 };
1008
1009 static struct ptlrpc_sec_sops plain_sec_sops = {
1010         .accept                 = plain_accept,
1011         .alloc_rs               = plain_alloc_rs,
1012         .authorize              = plain_authorize,
1013         .free_rs                = plain_free_rs,
1014         .unwrap_bulk            = plain_svc_unwrap_bulk,
1015         .wrap_bulk              = plain_svc_wrap_bulk,
1016 };
1017
1018 static struct ptlrpc_sec_policy plain_policy = {
1019         .sp_owner               = THIS_MODULE,
1020         .sp_name                = "plain",
1021         .sp_policy              = SPTLRPC_POLICY_PLAIN,
1022         .sp_cops                = &plain_sec_cops,
1023         .sp_sops                = &plain_sec_sops,
1024 };
1025
1026 int sptlrpc_plain_init(void)
1027 {
1028         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1029         int rc;
1030
1031         buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1032         plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1033
1034         rc = sptlrpc_register_policy(&plain_policy);
1035         if (rc)
1036                 CERROR("failed to register: %d\n", rc);
1037
1038         return rc;
1039 }
1040
1041 void sptlrpc_plain_fini(void)
1042 {
1043         int rc;
1044
1045         rc = sptlrpc_unregister_policy(&plain_policy);
1046         if (rc)
1047                 CERROR("cannot unregister: %d\n", rc);
1048 }