Whamcloud - gitweb
b=16098
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_plain.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #ifndef __KERNEL__
47 #include <liblustre.h>
48 #endif
49
50 #include <obd_support.h>
51 #include <obd_cksum.h>
52 #include <obd_class.h>
53 #include <lustre_net.h>
54 #include <lustre_sec.h>
55
56 struct plain_sec {
57         struct ptlrpc_sec       pls_base;
58         rwlock_t                pls_lock;
59         struct ptlrpc_cli_ctx  *pls_ctx;
60 };
61
62 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
63 {
64         return container_of(sec, struct plain_sec, pls_base);
65 }
66
67 static struct ptlrpc_sec_policy plain_policy;
68 static struct ptlrpc_ctx_ops    plain_ctx_ops;
69 static struct ptlrpc_svc_ctx    plain_svc_ctx;
70
71 static unsigned int plain_at_offset;
72
73 /*
74  * flavor flags (maximum 8 flags)
75  */
76 #define PLAIN_WFLVR_FLAGS_OFFSET        (12)
77 #define PLAIN_WFLVR_FLAG_BULK           (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
78 #define PLAIN_WFLVR_FLAG_USER           (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
79
80 #define PLAIN_WFLVR_HAS_BULK(wflvr)      \
81         (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
82 #define PLAIN_WFLVR_HAS_USER(wflvr)      \
83         (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
84
85 #define PLAIN_WFLVR_TO_RPC(wflvr)       \
86         ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
87
88 /*
89  * similar to null sec, temporarily use the third byte of lm_secflvr to identify
90  * the source sec part.
91  */
92 static inline
93 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
94 {
95         msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
96 }
97
98 static inline
99 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
100 {
101         return (msg->lm_secflvr >> 16) & 0xFF;
102 }
103
104 /*
105  * for simplicity, plain policy rpc use fixed layout.
106  */
107 #define PLAIN_PACK_SEGMENTS             (3)
108
109 #define PLAIN_PACK_MSG_OFF              (0)
110 #define PLAIN_PACK_USER_OFF             (1)
111 #define PLAIN_PACK_BULK_OFF             (2)
112
113 /****************************************
114  * cli_ctx apis                         *
115  ****************************************/
116
117 static
118 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
119 {
120         /* should never reach here */
121         LBUG();
122         return 0;
123 }
124
125 static
126 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
127 {
128         return 0;
129 }
130
131 static
132 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
133 {
134         struct lustre_msg_v2 *msg = req->rq_reqbuf;
135         ENTRY;
136
137         msg->lm_secflvr = req->rq_flvr.sf_rpc;
138         if (req->rq_pack_bulk)
139                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
140         if (req->rq_pack_udesc)
141                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
142
143         plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
144
145         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
146                                                  msg->lm_buflens);
147         RETURN(0);
148 }
149
150 static
151 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
152 {
153         struct lustre_msg *msg = req->rq_repdata;
154         int                early = 0;
155         __u32              cksum;
156         ENTRY;
157
158         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
159                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
160                 RETURN(-EPROTO);
161         }
162
163         /* find out if it's an early reply */
164         if ((char *) msg < req->rq_repbuf ||
165             (char *) msg >= req->rq_repbuf + req->rq_repbuf_len)
166                 early = 1;
167
168         /* expect no user desc in reply */
169         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
170                 CERROR("Unexpected udesc flag in reply\n");
171                 RETURN(-EPROTO);
172         }
173
174         if (unlikely(early)) {
175                 cksum = crc32_le(!(__u32) 0,
176                                  lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
177                                  lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
178                 if (cksum != msg->lm_cksum) {
179                         CWARN("early reply checksum mismatch: %08x != %08x\n",
180                               cpu_to_le32(cksum), msg->lm_cksum);
181                         RETURN(-EINVAL);
182                 }
183         } else {
184                 /* whether we sent with bulk or not, we expect the same
185                  * in reply, except for early reply */
186                 if (!early &&
187                     !equi(req->rq_pack_bulk == 1,
188                           PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
189                         CERROR("%s bulk checksum in reply\n",
190                                req->rq_pack_bulk ? "Missing" : "Unexpected");
191                         RETURN(-EPROTO);
192                 }
193
194                 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
195                     bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
196                         CERROR("Mal-formed bulk checksum reply\n");
197                         RETURN(-EINVAL);
198                 }
199         }
200
201         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
202         req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
203         RETURN(0);
204 }
205
206 static
207 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
208                         struct ptlrpc_request *req,
209                         struct ptlrpc_bulk_desc *desc)
210 {
211         LASSERT(req->rq_pack_bulk);
212         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
213
214         return bulk_csum_cli_request(desc, req->rq_bulk_read,
215                                      req->rq_flvr.sf_bulk_hash,
216                                      req->rq_reqbuf,
217                                      PLAIN_PACK_BULK_OFF);
218 }
219
220 static
221 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
222                           struct ptlrpc_request *req,
223                           struct ptlrpc_bulk_desc *desc)
224 {
225         LASSERT(req->rq_pack_bulk);
226         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
227         LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
228
229         return bulk_csum_cli_reply(desc, req->rq_bulk_read,
230                                    req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
231                                    req->rq_repdata, PLAIN_PACK_BULK_OFF);
232 }
233
234 /****************************************
235  * sec apis                             *
236  ****************************************/
237
238 static
239 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
240 {
241         struct ptlrpc_cli_ctx  *ctx, *ctx_new;
242
243         OBD_ALLOC_PTR(ctx_new);
244
245         write_lock(&plsec->pls_lock);
246
247         ctx = plsec->pls_ctx;
248         if (ctx) {
249                 atomic_inc(&ctx->cc_refcount);
250
251                 if (ctx_new)
252                         OBD_FREE_PTR(ctx_new);
253         } else if (ctx_new) {
254                 ctx = ctx_new;
255
256                 atomic_set(&ctx->cc_refcount, 1); /* for cache */
257                 ctx->cc_sec = &plsec->pls_base;
258                 ctx->cc_ops = &plain_ctx_ops;
259                 ctx->cc_expire = 0;
260                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
261                 ctx->cc_vcred.vc_uid = 0;
262                 spin_lock_init(&ctx->cc_lock);
263                 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
264                 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
265
266                 plsec->pls_ctx = ctx;
267                 atomic_inc(&plsec->pls_base.ps_nctx);
268                 atomic_inc(&plsec->pls_base.ps_refcount);
269
270                 atomic_inc(&ctx->cc_refcount); /* for caller */
271         }
272
273         write_unlock(&plsec->pls_lock);
274
275         return ctx;
276 }
277
278 static
279 void plain_destroy_sec(struct ptlrpc_sec *sec)
280 {
281         struct plain_sec       *plsec = sec2plsec(sec);
282         ENTRY;
283
284         LASSERT(sec->ps_policy == &plain_policy);
285         LASSERT(sec->ps_import);
286         LASSERT(atomic_read(&sec->ps_refcount) == 0);
287         LASSERT(atomic_read(&sec->ps_nctx) == 0);
288         LASSERT(plsec->pls_ctx == NULL);
289
290         class_import_put(sec->ps_import);
291
292         OBD_FREE_PTR(plsec);
293         EXIT;
294 }
295
296 static
297 void plain_kill_sec(struct ptlrpc_sec *sec)
298 {
299         sec->ps_dying = 1;
300 }
301
302 static
303 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
304                                     struct ptlrpc_svc_ctx *svc_ctx,
305                                     struct sptlrpc_flavor *sf)
306 {
307         struct plain_sec       *plsec;
308         struct ptlrpc_sec      *sec;
309         struct ptlrpc_cli_ctx  *ctx;
310         ENTRY;
311
312         LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
313
314         if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
315                 CERROR("plain policy don't support bulk cipher: %u\n",
316                        sf->sf_bulk_ciph);
317                 RETURN(NULL);
318         }
319
320         OBD_ALLOC_PTR(plsec);
321         if (plsec == NULL)
322                 RETURN(NULL);
323
324         /*
325          * initialize plain_sec
326          */
327         plsec->pls_lock = RW_LOCK_UNLOCKED;
328         plsec->pls_ctx = NULL;
329
330         sec = &plsec->pls_base;
331         sec->ps_policy = &plain_policy;
332         atomic_set(&sec->ps_refcount, 0);
333         atomic_set(&sec->ps_nctx, 0);
334         sec->ps_id = sptlrpc_get_next_secid();
335         sec->ps_import = class_import_get(imp);
336         sec->ps_flvr = *sf;
337         sec->ps_lock = SPIN_LOCK_UNLOCKED;
338         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
339         sec->ps_gc_interval = 0;
340         sec->ps_gc_next = 0;
341
342         /* install ctx immediately if this is a reverse sec */
343         if (svc_ctx) {
344                 ctx = plain_sec_install_ctx(plsec);
345                 if (ctx == NULL) {
346                         plain_destroy_sec(sec);
347                         RETURN(NULL);
348                 }
349                 sptlrpc_cli_ctx_put(ctx, 1);
350         }
351
352         RETURN(sec);
353 }
354
355 static
356 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
357                                         struct vfs_cred *vcred,
358                                         int create, int remove_dead)
359 {
360         struct plain_sec       *plsec = sec2plsec(sec);
361         struct ptlrpc_cli_ctx  *ctx;
362         ENTRY;
363
364         read_lock(&plsec->pls_lock);
365         ctx = plsec->pls_ctx;
366         if (ctx)
367                 atomic_inc(&ctx->cc_refcount);
368         read_unlock(&plsec->pls_lock);
369
370         if (unlikely(ctx == NULL))
371                 ctx = plain_sec_install_ctx(plsec);
372
373         RETURN(ctx);
374 }
375
376 static
377 void plain_release_ctx(struct ptlrpc_sec *sec,
378                        struct ptlrpc_cli_ctx *ctx, int sync)
379 {
380         LASSERT(atomic_read(&sec->ps_refcount) > 0);
381         LASSERT(atomic_read(&sec->ps_nctx) > 0);
382         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
383         LASSERT(ctx->cc_sec == sec);
384
385         OBD_FREE_PTR(ctx);
386
387         atomic_dec(&sec->ps_nctx);
388         sptlrpc_sec_put(sec);
389 }
390
391 static
392 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
393                           uid_t uid, int grace, int force)
394 {
395         struct plain_sec       *plsec = sec2plsec(sec);
396         struct ptlrpc_cli_ctx  *ctx;
397         ENTRY;
398
399         /* do nothing unless caller want to flush for 'all' */
400         if (uid != -1)
401                 RETURN(0);
402
403         write_lock(&plsec->pls_lock);
404         ctx = plsec->pls_ctx;
405         plsec->pls_ctx = NULL;
406         write_unlock(&plsec->pls_lock);
407
408         if (ctx)
409                 sptlrpc_cli_ctx_put(ctx, 1);
410         RETURN(0);
411 }
412
413 static
414 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
415                        struct ptlrpc_request *req,
416                        int msgsize)
417 {
418         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
419         int alloc_len;
420         ENTRY;
421
422         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
423
424         if (req->rq_pack_udesc)
425                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
426
427         if (req->rq_pack_bulk) {
428                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
429
430                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
431                                                 req->rq_flvr.sf_bulk_hash, 1,
432                                                 req->rq_bulk_read);
433         }
434
435         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
436
437         if (!req->rq_reqbuf) {
438                 LASSERT(!req->rq_pool);
439
440                 alloc_len = size_roundup_power2(alloc_len);
441                 OBD_ALLOC(req->rq_reqbuf, alloc_len);
442                 if (!req->rq_reqbuf)
443                         RETURN(-ENOMEM);
444
445                 req->rq_reqbuf_len = alloc_len;
446         } else {
447                 LASSERT(req->rq_pool);
448                 LASSERT(req->rq_reqbuf_len >= alloc_len);
449                 memset(req->rq_reqbuf, 0, alloc_len);
450         }
451
452         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
453         req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
454
455         if (req->rq_pack_udesc)
456                 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
457
458         RETURN(0);
459 }
460
461 static
462 void plain_free_reqbuf(struct ptlrpc_sec *sec,
463                        struct ptlrpc_request *req)
464 {
465         ENTRY;
466         if (!req->rq_pool) {
467                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
468                 req->rq_reqbuf = NULL;
469                 req->rq_reqbuf_len = 0;
470         }
471
472         req->rq_reqmsg = NULL;
473         EXIT;
474 }
475
476 static
477 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
478                        struct ptlrpc_request *req,
479                        int msgsize)
480 {
481         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
482         int alloc_len;
483         ENTRY;
484
485         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
486
487         if (req->rq_pack_bulk) {
488                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
489                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
490                                                 req->rq_flvr.sf_bulk_hash, 0,
491                                                 req->rq_bulk_read);
492         }
493
494         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
495
496         /* add space for early reply */
497         alloc_len += plain_at_offset;
498
499         alloc_len = size_roundup_power2(alloc_len);
500
501         OBD_ALLOC(req->rq_repbuf, alloc_len);
502         if (!req->rq_repbuf)
503                 RETURN(-ENOMEM);
504
505         req->rq_repbuf_len = alloc_len;
506         RETURN(0);
507 }
508
509 static
510 void plain_free_repbuf(struct ptlrpc_sec *sec,
511                        struct ptlrpc_request *req)
512 {
513         ENTRY;
514         OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
515         req->rq_repbuf = NULL;
516         req->rq_repbuf_len = 0;
517
518         req->rq_repmsg = NULL;
519         EXIT;
520 }
521
522 static
523 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
524                          struct ptlrpc_request *req,
525                          int segment, int newsize)
526 {
527         struct lustre_msg      *newbuf;
528         int                     oldsize;
529         int                     newmsg_size, newbuf_size;
530         ENTRY;
531
532         LASSERT(req->rq_reqbuf);
533         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
534         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
535                 req->rq_reqmsg);
536
537         /* compute new embedded msg size.  */
538         oldsize = req->rq_reqmsg->lm_buflens[segment];
539         req->rq_reqmsg->lm_buflens[segment] = newsize;
540         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
541                                          req->rq_reqmsg->lm_buflens);
542         req->rq_reqmsg->lm_buflens[segment] = oldsize;
543
544         /* compute new wrapper msg size.  */
545         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
546         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
547         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
548                                          req->rq_reqbuf->lm_buflens);
549         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
550
551         /* request from pool should always have enough buffer */
552         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
553
554         if (req->rq_reqbuf_len < newbuf_size) {
555                 newbuf_size = size_roundup_power2(newbuf_size);
556
557                 OBD_ALLOC(newbuf, newbuf_size);
558                 if (newbuf == NULL)
559                         RETURN(-ENOMEM);
560
561                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
562
563                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
564                 req->rq_reqbuf = newbuf;
565                 req->rq_reqbuf_len = newbuf_size;
566                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
567                                                 PLAIN_PACK_MSG_OFF, 0);
568         }
569
570         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
571                                      newmsg_size);
572         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
573
574         req->rq_reqlen = newmsg_size;
575         RETURN(0);
576 }
577
578 /****************************************
579  * service apis                         *
580  ****************************************/
581
582 static struct ptlrpc_svc_ctx plain_svc_ctx = {
583         .sc_refcount    = ATOMIC_INIT(1),
584         .sc_policy      = &plain_policy,
585 };
586
587 static
588 int plain_accept(struct ptlrpc_request *req)
589 {
590         struct lustre_msg *msg = req->rq_reqbuf;
591         ENTRY;
592
593         LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
594
595         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
596                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
597                 RETURN(SECSVC_DROP);
598         }
599
600         if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
601                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
602                 RETURN(SECSVC_DROP);
603         }
604
605         req->rq_sp_from = plain_decode_sec_part(msg);
606
607         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
608                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
609                         CERROR("Mal-formed user descriptor\n");
610                         RETURN(SECSVC_DROP);
611                 }
612
613                 req->rq_pack_udesc = 1;
614                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
615         }
616
617         if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
618                 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
619                         CERROR("Mal-formed bulk checksum request\n");
620                         RETURN(SECSVC_DROP);
621                 }
622
623                 req->rq_pack_bulk = 1;
624         }
625
626         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
627         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
628
629         req->rq_svc_ctx = &plain_svc_ctx;
630         atomic_inc(&req->rq_svc_ctx->sc_refcount);
631
632         RETURN(SECSVC_OK);
633 }
634
635 static
636 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
637 {
638         struct ptlrpc_reply_state   *rs;
639         struct ptlrpc_bulk_sec_desc *bsd;
640         __u32                        buflens[PLAIN_PACK_SEGMENTS] = { 0, };
641         int                          rs_size = sizeof(*rs);
642         ENTRY;
643
644         LASSERT(msgsize % 8 == 0);
645
646         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
647
648         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
649                 bsd = lustre_msg_buf(req->rq_reqbuf,
650                                      PLAIN_PACK_BULK_OFF, sizeof(*bsd));
651                 LASSERT(bsd);
652
653                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
654                                                         bsd->bsd_hash_alg, 0,
655                                                         req->rq_bulk_read);
656         }
657         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
658
659         rs = req->rq_reply_state;
660
661         if (rs) {
662                 /* pre-allocated */
663                 LASSERT(rs->rs_size >= rs_size);
664         } else {
665                 OBD_ALLOC(rs, rs_size);
666                 if (rs == NULL)
667                         RETURN(-ENOMEM);
668
669                 rs->rs_size = rs_size;
670         }
671
672         rs->rs_svc_ctx = req->rq_svc_ctx;
673         atomic_inc(&req->rq_svc_ctx->sc_refcount);
674         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
675         rs->rs_repbuf_len = rs_size - sizeof(*rs);
676
677         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
678         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
679
680         req->rq_reply_state = rs;
681         RETURN(0);
682 }
683
684 static
685 void plain_free_rs(struct ptlrpc_reply_state *rs)
686 {
687         ENTRY;
688
689         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
690         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
691
692         if (!rs->rs_prealloc)
693                 OBD_FREE(rs, rs->rs_size);
694         EXIT;
695 }
696
697 static
698 int plain_authorize(struct ptlrpc_request *req)
699 {
700         struct ptlrpc_reply_state *rs = req->rq_reply_state;
701         struct lustre_msg_v2      *msg = rs->rs_repbuf;
702         int                        len;
703         ENTRY;
704
705         LASSERT(rs);
706         LASSERT(msg);
707
708         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
709                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
710                                         req->rq_replen, 1);
711         else
712                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
713
714         msg->lm_secflvr = req->rq_flvr.sf_rpc;
715         if (req->rq_pack_bulk)
716                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
717
718         rs->rs_repdata_len = len;
719
720         if (likely(req->rq_packed_final)) {
721                 req->rq_reply_off = plain_at_offset;
722         } else {
723                 msg->lm_cksum = crc32_le(!(__u32) 0,
724                                 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
725                                 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
726                 req->rq_reply_off = 0;
727         }
728
729         RETURN(0);
730 }
731
732 static
733 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
734                           struct ptlrpc_bulk_desc *desc)
735 {
736         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
737
738         LASSERT(rs);
739         LASSERT(req->rq_pack_bulk);
740         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
741         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
742
743         return bulk_csum_svc(desc, req->rq_bulk_read,
744                              lustre_msg_buf(req->rq_reqbuf,
745                                             PLAIN_PACK_BULK_OFF, 0),
746                              lustre_msg_buflen(req->rq_reqbuf,
747                                                PLAIN_PACK_BULK_OFF),
748                              lustre_msg_buf(rs->rs_repbuf,
749                                             PLAIN_PACK_BULK_OFF, 0),
750                              lustre_msg_buflen(rs->rs_repbuf,
751                                                PLAIN_PACK_BULK_OFF));
752 }
753
754 static
755 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
756                         struct ptlrpc_bulk_desc *desc)
757 {
758         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
759
760         LASSERT(rs);
761         LASSERT(req->rq_pack_bulk);
762         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
763         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
764
765         return bulk_csum_svc(desc, req->rq_bulk_read,
766                              lustre_msg_buf(req->rq_reqbuf,
767                                             PLAIN_PACK_BULK_OFF, 0),
768                              lustre_msg_buflen(req->rq_reqbuf,
769                                                PLAIN_PACK_BULK_OFF),
770                              lustre_msg_buf(rs->rs_repbuf,
771                                             PLAIN_PACK_BULK_OFF, 0),
772                              lustre_msg_buflen(rs->rs_repbuf,
773                                                PLAIN_PACK_BULK_OFF));
774 }
775
776 static struct ptlrpc_ctx_ops plain_ctx_ops = {
777         .refresh                = plain_ctx_refresh,
778         .validate               = plain_ctx_validate,
779         .sign                   = plain_ctx_sign,
780         .verify                 = plain_ctx_verify,
781         .wrap_bulk              = plain_cli_wrap_bulk,
782         .unwrap_bulk            = plain_cli_unwrap_bulk,
783 };
784
785 static struct ptlrpc_sec_cops plain_sec_cops = {
786         .create_sec             = plain_create_sec,
787         .destroy_sec            = plain_destroy_sec,
788         .kill_sec               = plain_kill_sec,
789         .lookup_ctx             = plain_lookup_ctx,
790         .release_ctx            = plain_release_ctx,
791         .flush_ctx_cache        = plain_flush_ctx_cache,
792         .alloc_reqbuf           = plain_alloc_reqbuf,
793         .alloc_repbuf           = plain_alloc_repbuf,
794         .free_reqbuf            = plain_free_reqbuf,
795         .free_repbuf            = plain_free_repbuf,
796         .enlarge_reqbuf         = plain_enlarge_reqbuf,
797 };
798
799 static struct ptlrpc_sec_sops plain_sec_sops = {
800         .accept                 = plain_accept,
801         .alloc_rs               = plain_alloc_rs,
802         .authorize              = plain_authorize,
803         .free_rs                = plain_free_rs,
804         .unwrap_bulk            = plain_svc_unwrap_bulk,
805         .wrap_bulk              = plain_svc_wrap_bulk,
806 };
807
808 static struct ptlrpc_sec_policy plain_policy = {
809         .sp_owner               = THIS_MODULE,
810         .sp_name                = "plain",
811         .sp_policy              = SPTLRPC_POLICY_PLAIN,
812         .sp_cops                = &plain_sec_cops,
813         .sp_sops                = &plain_sec_sops,
814 };
815
816 int sptlrpc_plain_init(void)
817 {
818         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
819         int rc;
820
821         buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
822         plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
823
824         rc = sptlrpc_register_policy(&plain_policy);
825         if (rc)
826                 CERROR("failed to register: %d\n", rc);
827
828         return rc;
829 }
830
831 void sptlrpc_plain_fini(void)
832 {
833         int rc;
834
835         rc = sptlrpc_unregister_policy(&plain_policy);
836         if (rc)
837                 CERROR("cannot unregister: %d\n", rc);
838 }