Whamcloud - gitweb
use generic LIST_HEAD macros instead of linux specific.
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 # define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #ifndef __KERNEL__
29 #include <liblustre.h>
30 #endif
31
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
36
37 struct plain_sec {
38         struct ptlrpc_sec       pls_base;
39         rwlock_t                pls_lock;
40         struct ptlrpc_cli_ctx  *pls_ctx;
41 };
42
43 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
44 {
45         return container_of(sec, struct plain_sec, pls_base);
46 }
47
48 static struct ptlrpc_sec_policy plain_policy;
49 static struct ptlrpc_ctx_ops    plain_ctx_ops;
50 static struct ptlrpc_svc_ctx    plain_svc_ctx;
51
52 /*
53  * flavor flags (maximum 8 flags)
54  */
55 #define PLAIN_WFLVR_FLAGS_OFFSET        (12)
56 #define PLAIN_WFLVR_FLAG_BULK           (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
57 #define PLAIN_WFLVR_FLAG_USER           (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
58
59 #define PLAIN_WFLVR_HAS_BULK(wflvr)      \
60         (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
61 #define PLAIN_WFLVR_HAS_USER(wflvr)      \
62         (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
63
64 #define PLAIN_WFLVR_TO_RPC(wflvr)       \
65         ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
66
67 /*
68  * similar to null sec, temporarily use the third byte of lm_secflvr to identify
69  * the source sec part.
70  */
71 static inline
72 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
73 {
74         msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
75 }
76
77 static inline
78 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
79 {
80         return (msg->lm_secflvr >> 16) & 0xFF;
81 }
82
83 /*
84  * for simplicity, plain policy rpc use fixed layout.
85  */
86 #define PLAIN_PACK_SEGMENTS             (3)
87
88 #define PLAIN_PACK_MSG_OFF              (0)
89 #define PLAIN_PACK_USER_OFF             (1)
90 #define PLAIN_PACK_BULK_OFF             (2)
91
92 /****************************************
93  * cli_ctx apis                         *
94  ****************************************/
95
96 static
97 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
98 {
99         /* should never reach here */
100         LBUG();
101         return 0;
102 }
103
104 static
105 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
106 {
107         return 0;
108 }
109
110 static
111 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
112 {
113         struct lustre_msg_v2 *msg = req->rq_reqbuf;
114         ENTRY;
115
116         msg->lm_secflvr = req->rq_flvr.sf_rpc;
117         if (req->rq_pack_bulk)
118                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
119         if (req->rq_pack_udesc)
120                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
121
122         plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
123
124         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
125                                                  msg->lm_buflens);
126         RETURN(0);
127 }
128
129 static
130 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
131 {
132         struct lustre_msg *msg = req->rq_repbuf;
133         ENTRY;
134
135         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
136                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
137                 RETURN(-EPROTO);
138         }
139
140         /* expect no user desc in reply */
141         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
142                 CERROR("Unexpected udesc flag in reply\n");
143                 RETURN(-EPROTO);
144         }
145
146         /* whether we sent with bulk or not, we expect the same in reply */
147         if (!equi(req->rq_pack_bulk == 1,
148                   PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
149                 CERROR("%s bulk checksum in reply\n",
150                        req->rq_pack_bulk ? "Missing" : "Unexpected");
151                 RETURN(-EPROTO);
152         }
153
154         if (req->rq_pack_bulk &&
155             bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
156                 CERROR("Mal-formed bulk checksum reply\n");
157                 RETURN(-EINVAL);
158         }
159
160         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
161         req->rq_replen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
162         RETURN(0);
163 }
164
165 static
166 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
167                         struct ptlrpc_request *req,
168                         struct ptlrpc_bulk_desc *desc)
169 {
170         LASSERT(req->rq_pack_bulk);
171         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
172
173         return bulk_csum_cli_request(desc, req->rq_bulk_read,
174                                      req->rq_flvr.sf_bulk_hash,
175                                      req->rq_reqbuf,
176                                      PLAIN_PACK_BULK_OFF);
177 }
178
179 static
180 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
181                           struct ptlrpc_request *req,
182                           struct ptlrpc_bulk_desc *desc)
183 {
184         LASSERT(req->rq_pack_bulk);
185         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
186         LASSERT(req->rq_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
187
188         return bulk_csum_cli_reply(desc, req->rq_bulk_read,
189                                    req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
190                                    req->rq_repbuf, PLAIN_PACK_BULK_OFF);
191 }
192
193 /****************************************
194  * sec apis                             *
195  ****************************************/
196
197 static
198 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
199 {
200         struct ptlrpc_cli_ctx  *ctx, *ctx_new;
201
202         OBD_ALLOC_PTR(ctx_new);
203
204         write_lock(&plsec->pls_lock);
205
206         ctx = plsec->pls_ctx;
207         if (ctx) {
208                 atomic_inc(&ctx->cc_refcount);
209
210                 if (ctx_new)
211                         OBD_FREE_PTR(ctx_new);
212         } else if (ctx_new) {
213                 ctx = ctx_new;
214
215                 atomic_set(&ctx->cc_refcount, 1); /* for cache */
216                 ctx->cc_sec = &plsec->pls_base;
217                 ctx->cc_ops = &plain_ctx_ops;
218                 ctx->cc_expire = 0;
219                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
220                 ctx->cc_vcred.vc_uid = 0;
221                 spin_lock_init(&ctx->cc_lock);
222                 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
223                 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
224
225                 plsec->pls_ctx = ctx;
226                 atomic_inc(&plsec->pls_base.ps_nctx);
227                 atomic_inc(&plsec->pls_base.ps_refcount);
228
229                 atomic_inc(&ctx->cc_refcount); /* for caller */
230         }
231
232         write_unlock(&plsec->pls_lock);
233
234         return ctx;
235 }
236
237 static
238 void plain_destroy_sec(struct ptlrpc_sec *sec)
239 {
240         struct plain_sec       *plsec = sec2plsec(sec);
241         ENTRY;
242
243         LASSERT(sec->ps_policy == &plain_policy);
244         LASSERT(sec->ps_import);
245         LASSERT(atomic_read(&sec->ps_refcount) == 0);
246         LASSERT(atomic_read(&sec->ps_nctx) == 0);
247         LASSERT(plsec->pls_ctx == NULL);
248
249         class_import_put(sec->ps_import);
250
251         OBD_FREE_PTR(plsec);
252         EXIT;
253 }
254
255 static
256 void plain_kill_sec(struct ptlrpc_sec *sec)
257 {
258         sec->ps_dying = 1;
259 }
260
261 static
262 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
263                                     struct ptlrpc_svc_ctx *svc_ctx,
264                                     struct sptlrpc_flavor *sf)
265 {
266         struct plain_sec       *plsec;
267         struct ptlrpc_sec      *sec;
268         struct ptlrpc_cli_ctx  *ctx;
269         ENTRY;
270
271         LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
272
273         if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
274                 CERROR("plain policy don't support bulk cipher: %u\n",
275                        sf->sf_bulk_ciph);
276                 RETURN(NULL);
277         }
278
279         OBD_ALLOC_PTR(plsec);
280         if (plsec == NULL)
281                 RETURN(NULL);
282
283         /*
284          * initialize plain_sec
285          */
286         plsec->pls_lock = RW_LOCK_UNLOCKED;
287         plsec->pls_ctx = NULL;
288
289         sec = &plsec->pls_base;
290         sec->ps_policy = &plain_policy;
291         atomic_set(&sec->ps_refcount, 0);
292         atomic_set(&sec->ps_nctx, 0);
293         sec->ps_id = sptlrpc_get_next_secid();
294         sec->ps_import = class_import_get(imp);
295         sec->ps_flvr = *sf;
296         sec->ps_lock = SPIN_LOCK_UNLOCKED;
297         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
298         sec->ps_gc_interval = 0;
299         sec->ps_gc_next = 0;
300
301         /* install ctx immediately if this is a reverse sec */
302         if (svc_ctx) {
303                 ctx = plain_sec_install_ctx(plsec);
304                 if (ctx == NULL) {
305                         plain_destroy_sec(sec);
306                         RETURN(NULL);
307                 }
308                 sptlrpc_cli_ctx_put(ctx, 1);
309         }
310
311         RETURN(sec);
312 }
313
314 static
315 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
316                                         struct vfs_cred *vcred,
317                                         int create, int remove_dead)
318 {
319         struct plain_sec       *plsec = sec2plsec(sec);
320         struct ptlrpc_cli_ctx  *ctx;
321         ENTRY;
322
323         read_lock(&plsec->pls_lock);
324         ctx = plsec->pls_ctx;
325         if (ctx)
326                 atomic_inc(&ctx->cc_refcount);
327         read_unlock(&plsec->pls_lock);
328
329         if (unlikely(ctx == NULL))
330                 ctx = plain_sec_install_ctx(plsec);
331
332         RETURN(ctx);
333 }
334
335 static
336 void plain_release_ctx(struct ptlrpc_sec *sec,
337                        struct ptlrpc_cli_ctx *ctx, int sync)
338 {
339         LASSERT(atomic_read(&sec->ps_refcount) > 0);
340         LASSERT(atomic_read(&sec->ps_nctx) > 0);
341         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
342         LASSERT(ctx->cc_sec == sec);
343
344         OBD_FREE_PTR(ctx);
345
346         atomic_dec(&sec->ps_nctx);
347         sptlrpc_sec_put(sec);
348 }
349
350 static
351 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
352                           uid_t uid, int grace, int force)
353 {
354         struct plain_sec       *plsec = sec2plsec(sec);
355         struct ptlrpc_cli_ctx  *ctx;
356         ENTRY;
357
358         /* do nothing unless caller want to flush for 'all' */
359         if (uid != -1)
360                 RETURN(0);
361
362         write_lock(&plsec->pls_lock);
363         ctx = plsec->pls_ctx;
364         plsec->pls_ctx = NULL;
365         write_unlock(&plsec->pls_lock);
366
367         if (ctx)
368                 sptlrpc_cli_ctx_put(ctx, 1);
369         RETURN(0);
370 }
371
372 static
373 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
374                        struct ptlrpc_request *req,
375                        int msgsize)
376 {
377         int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
378         int alloc_len;
379         ENTRY;
380
381         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
382
383         if (req->rq_pack_udesc)
384                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
385
386         if (req->rq_pack_bulk) {
387                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
388
389                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
390                                                 req->rq_flvr.sf_bulk_hash, 1,
391                                                 req->rq_bulk_read);
392         }
393
394         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
395
396         if (!req->rq_reqbuf) {
397                 LASSERT(!req->rq_pool);
398
399                 alloc_len = size_roundup_power2(alloc_len);
400                 OBD_ALLOC(req->rq_reqbuf, alloc_len);
401                 if (!req->rq_reqbuf)
402                         RETURN(-ENOMEM);
403
404                 req->rq_reqbuf_len = alloc_len;
405         } else {
406                 LASSERT(req->rq_pool);
407                 LASSERT(req->rq_reqbuf_len >= alloc_len);
408                 memset(req->rq_reqbuf, 0, alloc_len);
409         }
410
411         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
412         req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
413
414         if (req->rq_pack_udesc)
415                 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
416
417         RETURN(0);
418 }
419
420 static
421 void plain_free_reqbuf(struct ptlrpc_sec *sec,
422                        struct ptlrpc_request *req)
423 {
424         ENTRY;
425         if (!req->rq_pool) {
426                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
427                 req->rq_reqbuf = NULL;
428                 req->rq_reqbuf_len = 0;
429         }
430
431         req->rq_reqmsg = NULL;
432         EXIT;
433 }
434
435 static
436 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
437                        struct ptlrpc_request *req,
438                        int msgsize)
439 {
440         int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
441         int alloc_len;
442         ENTRY;
443
444         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
445
446         if (req->rq_pack_bulk) {
447                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
448
449                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
450                                                 req->rq_flvr.sf_bulk_hash, 0,
451                                                 req->rq_bulk_read);
452         }
453
454         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
455         alloc_len = size_roundup_power2(alloc_len);
456
457         OBD_ALLOC(req->rq_repbuf, alloc_len);
458         if (!req->rq_repbuf)
459                 RETURN(-ENOMEM);
460
461         req->rq_repbuf_len = alloc_len;
462         RETURN(0);
463 }
464
465 static
466 void plain_free_repbuf(struct ptlrpc_sec *sec,
467                        struct ptlrpc_request *req)
468 {
469         ENTRY;
470         OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
471         req->rq_repbuf = NULL;
472         req->rq_repbuf_len = 0;
473
474         req->rq_repmsg = NULL;
475         EXIT;
476 }
477
478 static
479 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
480                          struct ptlrpc_request *req,
481                          int segment, int newsize)
482 {
483         struct lustre_msg      *newbuf;
484         int                     oldsize;
485         int                     newmsg_size, newbuf_size;
486         ENTRY;
487
488         LASSERT(req->rq_reqbuf);
489         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
490         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
491                 req->rq_reqmsg);
492
493         /* compute new embedded msg size.  */
494         oldsize = req->rq_reqmsg->lm_buflens[segment];
495         req->rq_reqmsg->lm_buflens[segment] = newsize;
496         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
497                                          req->rq_reqmsg->lm_buflens);
498         req->rq_reqmsg->lm_buflens[segment] = oldsize;
499
500         /* compute new wrapper msg size.  */
501         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
502         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
503         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
504                                          req->rq_reqbuf->lm_buflens);
505         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
506
507         /* request from pool should always have enough buffer */
508         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
509
510         if (req->rq_reqbuf_len < newbuf_size) {
511                 newbuf_size = size_roundup_power2(newbuf_size);
512
513                 OBD_ALLOC(newbuf, newbuf_size);
514                 if (newbuf == NULL)
515                         RETURN(-ENOMEM);
516
517                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
518
519                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
520                 req->rq_reqbuf = newbuf;
521                 req->rq_reqbuf_len = newbuf_size;
522                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
523                                                 PLAIN_PACK_MSG_OFF, 0);
524         }
525
526         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
527                                      newmsg_size);
528         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
529
530         req->rq_reqlen = newmsg_size;
531         RETURN(0);
532 }
533
534 /****************************************
535  * service apis                         *
536  ****************************************/
537
538 static struct ptlrpc_svc_ctx plain_svc_ctx = {
539         .sc_refcount    = ATOMIC_INIT(1),
540         .sc_policy      = &plain_policy,
541 };
542
543 static
544 int plain_accept(struct ptlrpc_request *req)
545 {
546         struct lustre_msg *msg = req->rq_reqbuf;
547         ENTRY;
548
549         LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
550
551         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
552                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
553                 RETURN(SECSVC_DROP);
554         }
555
556         if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
557                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
558                 RETURN(SECSVC_DROP);
559         }
560
561         req->rq_sp_from = plain_decode_sec_part(msg);
562
563         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
564                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
565                         CERROR("Mal-formed user descriptor\n");
566                         RETURN(SECSVC_DROP);
567                 }
568
569                 req->rq_pack_udesc = 1;
570                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
571         }
572
573         if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
574                 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
575                         CERROR("Mal-formed bulk checksum request\n");
576                         RETURN(SECSVC_DROP);
577                 }
578
579                 req->rq_pack_bulk = 1;
580         }
581
582         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
583         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
584
585         req->rq_svc_ctx = &plain_svc_ctx;
586         atomic_inc(&req->rq_svc_ctx->sc_refcount);
587
588         RETURN(SECSVC_OK);
589 }
590
591 static
592 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
593 {
594         struct ptlrpc_reply_state   *rs;
595         struct ptlrpc_bulk_sec_desc *bsd;
596         int                          buflens[PLAIN_PACK_SEGMENTS] = { 0, };
597         int                          rs_size = sizeof(*rs);
598         ENTRY;
599
600         LASSERT(msgsize % 8 == 0);
601
602         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
603
604         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
605                 bsd = lustre_msg_buf(req->rq_reqbuf,
606                                      PLAIN_PACK_BULK_OFF, sizeof(*bsd));
607                 LASSERT(bsd);
608
609                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
610                                                         bsd->bsd_hash_alg, 0,
611                                                         req->rq_bulk_read);
612         }
613         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
614
615         rs = req->rq_reply_state;
616
617         if (rs) {
618                 /* pre-allocated */
619                 LASSERT(rs->rs_size >= rs_size);
620         } else {
621                 OBD_ALLOC(rs, rs_size);
622                 if (rs == NULL)
623                         RETURN(-ENOMEM);
624
625                 rs->rs_size = rs_size;
626         }
627
628         rs->rs_svc_ctx = req->rq_svc_ctx;
629         atomic_inc(&req->rq_svc_ctx->sc_refcount);
630         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
631         rs->rs_repbuf_len = rs_size - sizeof(*rs);
632
633         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
634         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
635
636         req->rq_reply_state = rs;
637         RETURN(0);
638 }
639
640 static
641 void plain_free_rs(struct ptlrpc_reply_state *rs)
642 {
643         ENTRY;
644
645         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
646         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
647
648         if (!rs->rs_prealloc)
649                 OBD_FREE(rs, rs->rs_size);
650         EXIT;
651 }
652
653 static
654 int plain_authorize(struct ptlrpc_request *req)
655 {
656         struct ptlrpc_reply_state *rs = req->rq_reply_state;
657         struct lustre_msg_v2      *msg = rs->rs_repbuf;
658         int                        len;
659         ENTRY;
660
661         LASSERT(rs);
662         LASSERT(msg);
663
664         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
665                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
666                                         req->rq_replen, 1);
667         else
668                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
669
670         msg->lm_secflvr = req->rq_flvr.sf_rpc;
671         if (req->rq_pack_bulk)
672                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
673
674         rs->rs_repdata_len = len;
675         RETURN(0);
676 }
677
678 static
679 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
680                           struct ptlrpc_bulk_desc *desc)
681 {
682         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
683
684         LASSERT(rs);
685         LASSERT(req->rq_pack_bulk);
686         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
687         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
688
689         return bulk_csum_svc(desc, req->rq_bulk_read,
690                              lustre_msg_buf(req->rq_reqbuf,
691                                             PLAIN_PACK_BULK_OFF, 0),
692                              lustre_msg_buflen(req->rq_reqbuf,
693                                                PLAIN_PACK_BULK_OFF),
694                              lustre_msg_buf(rs->rs_repbuf,
695                                             PLAIN_PACK_BULK_OFF, 0),
696                              lustre_msg_buflen(rs->rs_repbuf,
697                                                PLAIN_PACK_BULK_OFF));
698 }
699
700 static
701 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
702                         struct ptlrpc_bulk_desc *desc)
703 {
704         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
705
706         LASSERT(rs);
707         LASSERT(req->rq_pack_bulk);
708         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
709         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
710
711         return bulk_csum_svc(desc, req->rq_bulk_read,
712                              lustre_msg_buf(req->rq_reqbuf,
713                                             PLAIN_PACK_BULK_OFF, 0),
714                              lustre_msg_buflen(req->rq_reqbuf,
715                                                PLAIN_PACK_BULK_OFF),
716                              lustre_msg_buf(rs->rs_repbuf,
717                                             PLAIN_PACK_BULK_OFF, 0),
718                              lustre_msg_buflen(rs->rs_repbuf,
719                                                PLAIN_PACK_BULK_OFF));
720 }
721
722 static struct ptlrpc_ctx_ops plain_ctx_ops = {
723         .refresh                = plain_ctx_refresh,
724         .validate               = plain_ctx_validate,
725         .sign                   = plain_ctx_sign,
726         .verify                 = plain_ctx_verify,
727         .wrap_bulk              = plain_cli_wrap_bulk,
728         .unwrap_bulk            = plain_cli_unwrap_bulk,
729 };
730
731 static struct ptlrpc_sec_cops plain_sec_cops = {
732         .create_sec             = plain_create_sec,
733         .destroy_sec            = plain_destroy_sec,
734         .kill_sec               = plain_kill_sec,
735         .lookup_ctx             = plain_lookup_ctx,
736         .release_ctx            = plain_release_ctx,
737         .flush_ctx_cache        = plain_flush_ctx_cache,
738         .alloc_reqbuf           = plain_alloc_reqbuf,
739         .alloc_repbuf           = plain_alloc_repbuf,
740         .free_reqbuf            = plain_free_reqbuf,
741         .free_repbuf            = plain_free_repbuf,
742         .enlarge_reqbuf         = plain_enlarge_reqbuf,
743 };
744
745 static struct ptlrpc_sec_sops plain_sec_sops = {
746         .accept                 = plain_accept,
747         .alloc_rs               = plain_alloc_rs,
748         .authorize              = plain_authorize,
749         .free_rs                = plain_free_rs,
750         .unwrap_bulk            = plain_svc_unwrap_bulk,
751         .wrap_bulk              = plain_svc_wrap_bulk,
752 };
753
754 static struct ptlrpc_sec_policy plain_policy = {
755         .sp_owner               = THIS_MODULE,
756         .sp_name                = "plain",
757         .sp_policy              = SPTLRPC_POLICY_PLAIN,
758         .sp_cops                = &plain_sec_cops,
759         .sp_sops                = &plain_sec_sops,
760 };
761
762 int sptlrpc_plain_init(void)
763 {
764         int rc;
765
766         rc = sptlrpc_register_policy(&plain_policy);
767         if (rc)
768                 CERROR("failed to register: %d\n", rc);
769
770         return rc;
771 }
772
773 void sptlrpc_plain_fini(void)
774 {
775         int rc;
776
777         rc = sptlrpc_unregister_policy(&plain_policy);
778         if (rc)
779                 CERROR("cannot unregister: %d\n", rc);
780 }