Whamcloud - gitweb
land clio.
[fs/lustre-release.git] / lustre / ptlrpc / sec_plain.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_plain.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #ifndef __KERNEL__
47 #include <liblustre.h>
48 #endif
49
50 #include <obd_support.h>
51 #include <obd_cksum.h>
52 #include <obd_class.h>
53 #include <lustre_net.h>
54 #include <lustre_sec.h>
55
56 struct plain_sec {
57         struct ptlrpc_sec       pls_base;
58         rwlock_t                pls_lock;
59         struct ptlrpc_cli_ctx  *pls_ctx;
60 };
61
62 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
63 {
64         return container_of(sec, struct plain_sec, pls_base);
65 }
66
67 static struct ptlrpc_sec_policy plain_policy;
68 static struct ptlrpc_ctx_ops    plain_ctx_ops;
69 static struct ptlrpc_svc_ctx    plain_svc_ctx;
70
71 static unsigned int plain_at_offset;
72
73 /*
74  * flavor flags (maximum 8 flags)
75  */
76 #define PLAIN_WFLVR_FLAGS_OFFSET        (12)
77 #define PLAIN_WFLVR_FLAG_BULK           (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
78 #define PLAIN_WFLVR_FLAG_USER           (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
79
80 #define PLAIN_WFLVR_HAS_BULK(wflvr)      \
81         (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
82 #define PLAIN_WFLVR_HAS_USER(wflvr)      \
83         (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
84
85 #define PLAIN_WFLVR_TO_RPC(wflvr)       \
86         ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
87
88 /*
89  * similar to null sec, temporarily use the third byte of lm_secflvr to identify
90  * the source sec part.
91  */
92 static inline
93 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
94 {
95         msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
96 }
97
98 static inline
99 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
100 {
101         return (msg->lm_secflvr >> 16) & 0xFF;
102 }
103
104 /*
105  * for simplicity, plain policy rpc use fixed layout.
106  */
107 #define PLAIN_PACK_SEGMENTS             (3)
108
109 #define PLAIN_PACK_MSG_OFF              (0)
110 #define PLAIN_PACK_USER_OFF             (1)
111 #define PLAIN_PACK_BULK_OFF             (2)
112
113 /****************************************
114  * cli_ctx apis                         *
115  ****************************************/
116
117 static
118 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
119 {
120         /* should never reach here */
121         LBUG();
122         return 0;
123 }
124
125 static
126 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
127 {
128         return 0;
129 }
130
131 static
132 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
133 {
134         struct lustre_msg_v2 *msg = req->rq_reqbuf;
135         ENTRY;
136
137         msg->lm_secflvr = req->rq_flvr.sf_rpc;
138         if (req->rq_pack_bulk)
139                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
140         if (req->rq_pack_udesc)
141                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
142
143         plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
144
145         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
146                                                  msg->lm_buflens);
147         RETURN(0);
148 }
149
150 static
151 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
152 {
153         struct lustre_msg *msg = req->rq_repdata;
154         __u32              cksum;
155         ENTRY;
156
157         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
158                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
159                 RETURN(-EPROTO);
160         }
161
162         /* expect no user desc in reply */
163         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
164                 CERROR("Unexpected udesc flag in reply\n");
165                 RETURN(-EPROTO);
166         }
167
168         if (unlikely(req->rq_early)) {
169                 cksum = crc32_le(!(__u32) 0,
170                                  lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
171                                  lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
172                 if (cksum != msg->lm_cksum) {
173                         CWARN("early reply checksum mismatch: %08x != %08x\n",
174                               cpu_to_le32(cksum), msg->lm_cksum);
175                         RETURN(-EINVAL);
176                 }
177         } else {
178                 /* whether we sent with bulk or not, we expect the same
179                  * in reply, except for early reply */
180                 if (!req->rq_early &&
181                     !equi(req->rq_pack_bulk == 1,
182                           PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
183                         CERROR("%s bulk checksum in reply\n",
184                                req->rq_pack_bulk ? "Missing" : "Unexpected");
185                         RETURN(-EPROTO);
186                 }
187
188                 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
189                     bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
190                         CERROR("Mal-formed bulk checksum reply\n");
191                         RETURN(-EINVAL);
192                 }
193         }
194
195         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
196         req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
197         RETURN(0);
198 }
199
200 static
201 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
202                         struct ptlrpc_request *req,
203                         struct ptlrpc_bulk_desc *desc)
204 {
205         LASSERT(req->rq_pack_bulk);
206         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
207
208         return bulk_csum_cli_request(desc, req->rq_bulk_read,
209                                      req->rq_flvr.sf_bulk_hash,
210                                      req->rq_reqbuf,
211                                      PLAIN_PACK_BULK_OFF);
212 }
213
214 static
215 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
216                           struct ptlrpc_request *req,
217                           struct ptlrpc_bulk_desc *desc)
218 {
219         LASSERT(req->rq_pack_bulk);
220         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
221         LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
222
223         return bulk_csum_cli_reply(desc, req->rq_bulk_read,
224                                    req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
225                                    req->rq_repdata, PLAIN_PACK_BULK_OFF);
226 }
227
228 /****************************************
229  * sec apis                             *
230  ****************************************/
231
232 static
233 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
234 {
235         struct ptlrpc_cli_ctx  *ctx, *ctx_new;
236
237         OBD_ALLOC_PTR(ctx_new);
238
239         write_lock(&plsec->pls_lock);
240
241         ctx = plsec->pls_ctx;
242         if (ctx) {
243                 atomic_inc(&ctx->cc_refcount);
244
245                 if (ctx_new)
246                         OBD_FREE_PTR(ctx_new);
247         } else if (ctx_new) {
248                 ctx = ctx_new;
249
250                 atomic_set(&ctx->cc_refcount, 1); /* for cache */
251                 ctx->cc_sec = &plsec->pls_base;
252                 ctx->cc_ops = &plain_ctx_ops;
253                 ctx->cc_expire = 0;
254                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
255                 ctx->cc_vcred.vc_uid = 0;
256                 spin_lock_init(&ctx->cc_lock);
257                 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
258                 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
259
260                 plsec->pls_ctx = ctx;
261                 atomic_inc(&plsec->pls_base.ps_nctx);
262                 atomic_inc(&plsec->pls_base.ps_refcount);
263
264                 atomic_inc(&ctx->cc_refcount); /* for caller */
265         }
266
267         write_unlock(&plsec->pls_lock);
268
269         return ctx;
270 }
271
272 static
273 void plain_destroy_sec(struct ptlrpc_sec *sec)
274 {
275         struct plain_sec       *plsec = sec2plsec(sec);
276         ENTRY;
277
278         LASSERT(sec->ps_policy == &plain_policy);
279         LASSERT(sec->ps_import);
280         LASSERT(atomic_read(&sec->ps_refcount) == 0);
281         LASSERT(atomic_read(&sec->ps_nctx) == 0);
282         LASSERT(plsec->pls_ctx == NULL);
283
284         class_import_put(sec->ps_import);
285
286         OBD_FREE_PTR(plsec);
287         EXIT;
288 }
289
290 static
291 void plain_kill_sec(struct ptlrpc_sec *sec)
292 {
293         sec->ps_dying = 1;
294 }
295
296 static
297 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
298                                     struct ptlrpc_svc_ctx *svc_ctx,
299                                     struct sptlrpc_flavor *sf)
300 {
301         struct plain_sec       *plsec;
302         struct ptlrpc_sec      *sec;
303         struct ptlrpc_cli_ctx  *ctx;
304         ENTRY;
305
306         LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
307
308         if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
309                 CERROR("plain policy don't support bulk cipher: %u\n",
310                        sf->sf_bulk_ciph);
311                 RETURN(NULL);
312         }
313
314         OBD_ALLOC_PTR(plsec);
315         if (plsec == NULL)
316                 RETURN(NULL);
317
318         /*
319          * initialize plain_sec
320          */
321         rwlock_init(&plsec->pls_lock);
322         plsec->pls_ctx = NULL;
323
324         sec = &plsec->pls_base;
325         sec->ps_policy = &plain_policy;
326         atomic_set(&sec->ps_refcount, 0);
327         atomic_set(&sec->ps_nctx, 0);
328         sec->ps_id = sptlrpc_get_next_secid();
329         sec->ps_import = class_import_get(imp);
330         sec->ps_flvr = *sf;
331         sec->ps_lock = SPIN_LOCK_UNLOCKED;
332         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
333         sec->ps_gc_interval = 0;
334         sec->ps_gc_next = 0;
335
336         /* install ctx immediately if this is a reverse sec */
337         if (svc_ctx) {
338                 ctx = plain_sec_install_ctx(plsec);
339                 if (ctx == NULL) {
340                         plain_destroy_sec(sec);
341                         RETURN(NULL);
342                 }
343                 sptlrpc_cli_ctx_put(ctx, 1);
344         }
345
346         RETURN(sec);
347 }
348
349 static
350 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
351                                         struct vfs_cred *vcred,
352                                         int create, int remove_dead)
353 {
354         struct plain_sec       *plsec = sec2plsec(sec);
355         struct ptlrpc_cli_ctx  *ctx;
356         ENTRY;
357
358         read_lock(&plsec->pls_lock);
359         ctx = plsec->pls_ctx;
360         if (ctx)
361                 atomic_inc(&ctx->cc_refcount);
362         read_unlock(&plsec->pls_lock);
363
364         if (unlikely(ctx == NULL))
365                 ctx = plain_sec_install_ctx(plsec);
366
367         RETURN(ctx);
368 }
369
370 static
371 void plain_release_ctx(struct ptlrpc_sec *sec,
372                        struct ptlrpc_cli_ctx *ctx, int sync)
373 {
374         LASSERT(atomic_read(&sec->ps_refcount) > 0);
375         LASSERT(atomic_read(&sec->ps_nctx) > 0);
376         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
377         LASSERT(ctx->cc_sec == sec);
378
379         OBD_FREE_PTR(ctx);
380
381         atomic_dec(&sec->ps_nctx);
382         sptlrpc_sec_put(sec);
383 }
384
385 static
386 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
387                           uid_t uid, int grace, int force)
388 {
389         struct plain_sec       *plsec = sec2plsec(sec);
390         struct ptlrpc_cli_ctx  *ctx;
391         ENTRY;
392
393         /* do nothing unless caller want to flush for 'all' */
394         if (uid != -1)
395                 RETURN(0);
396
397         write_lock(&plsec->pls_lock);
398         ctx = plsec->pls_ctx;
399         plsec->pls_ctx = NULL;
400         write_unlock(&plsec->pls_lock);
401
402         if (ctx)
403                 sptlrpc_cli_ctx_put(ctx, 1);
404         RETURN(0);
405 }
406
407 static
408 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
409                        struct ptlrpc_request *req,
410                        int msgsize)
411 {
412         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
413         int alloc_len;
414         ENTRY;
415
416         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
417
418         if (req->rq_pack_udesc)
419                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
420
421         if (req->rq_pack_bulk) {
422                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
423
424                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
425                                                 req->rq_flvr.sf_bulk_hash, 1,
426                                                 req->rq_bulk_read);
427         }
428
429         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
430
431         if (!req->rq_reqbuf) {
432                 LASSERT(!req->rq_pool);
433
434                 alloc_len = size_roundup_power2(alloc_len);
435                 OBD_ALLOC(req->rq_reqbuf, alloc_len);
436                 if (!req->rq_reqbuf)
437                         RETURN(-ENOMEM);
438
439                 req->rq_reqbuf_len = alloc_len;
440         } else {
441                 LASSERT(req->rq_pool);
442                 LASSERT(req->rq_reqbuf_len >= alloc_len);
443                 memset(req->rq_reqbuf, 0, alloc_len);
444         }
445
446         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
447         req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
448
449         if (req->rq_pack_udesc)
450                 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
451
452         RETURN(0);
453 }
454
455 static
456 void plain_free_reqbuf(struct ptlrpc_sec *sec,
457                        struct ptlrpc_request *req)
458 {
459         ENTRY;
460         if (!req->rq_pool) {
461                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
462                 req->rq_reqbuf = NULL;
463                 req->rq_reqbuf_len = 0;
464         }
465
466         req->rq_reqmsg = NULL;
467         EXIT;
468 }
469
470 static
471 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
472                        struct ptlrpc_request *req,
473                        int msgsize)
474 {
475         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
476         int alloc_len;
477         ENTRY;
478
479         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
480
481         if (req->rq_pack_bulk) {
482                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
483                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
484                                                 req->rq_flvr.sf_bulk_hash, 0,
485                                                 req->rq_bulk_read);
486         }
487
488         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
489
490         /* add space for early reply */
491         alloc_len += plain_at_offset;
492
493         alloc_len = size_roundup_power2(alloc_len);
494
495         OBD_ALLOC(req->rq_repbuf, alloc_len);
496         if (!req->rq_repbuf)
497                 RETURN(-ENOMEM);
498
499         req->rq_repbuf_len = alloc_len;
500         RETURN(0);
501 }
502
503 static
504 void plain_free_repbuf(struct ptlrpc_sec *sec,
505                        struct ptlrpc_request *req)
506 {
507         ENTRY;
508         OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
509         req->rq_repbuf = NULL;
510         req->rq_repbuf_len = 0;
511
512         req->rq_repmsg = NULL;
513         EXIT;
514 }
515
516 static
517 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
518                          struct ptlrpc_request *req,
519                          int segment, int newsize)
520 {
521         struct lustre_msg      *newbuf;
522         int                     oldsize;
523         int                     newmsg_size, newbuf_size;
524         ENTRY;
525
526         LASSERT(req->rq_reqbuf);
527         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
528         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
529                 req->rq_reqmsg);
530
531         /* compute new embedded msg size.  */
532         oldsize = req->rq_reqmsg->lm_buflens[segment];
533         req->rq_reqmsg->lm_buflens[segment] = newsize;
534         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
535                                          req->rq_reqmsg->lm_buflens);
536         req->rq_reqmsg->lm_buflens[segment] = oldsize;
537
538         /* compute new wrapper msg size.  */
539         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
540         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
541         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
542                                          req->rq_reqbuf->lm_buflens);
543         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
544
545         /* request from pool should always have enough buffer */
546         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
547
548         if (req->rq_reqbuf_len < newbuf_size) {
549                 newbuf_size = size_roundup_power2(newbuf_size);
550
551                 OBD_ALLOC(newbuf, newbuf_size);
552                 if (newbuf == NULL)
553                         RETURN(-ENOMEM);
554
555                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
556
557                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
558                 req->rq_reqbuf = newbuf;
559                 req->rq_reqbuf_len = newbuf_size;
560                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
561                                                 PLAIN_PACK_MSG_OFF, 0);
562         }
563
564         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
565                                      newmsg_size);
566         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
567
568         req->rq_reqlen = newmsg_size;
569         RETURN(0);
570 }
571
572 /****************************************
573  * service apis                         *
574  ****************************************/
575
576 static struct ptlrpc_svc_ctx plain_svc_ctx = {
577         .sc_refcount    = ATOMIC_INIT(1),
578         .sc_policy      = &plain_policy,
579 };
580
581 static
582 int plain_accept(struct ptlrpc_request *req)
583 {
584         struct lustre_msg *msg = req->rq_reqbuf;
585         ENTRY;
586
587         LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
588
589         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
590                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
591                 RETURN(SECSVC_DROP);
592         }
593
594         if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
595                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
596                 RETURN(SECSVC_DROP);
597         }
598
599         req->rq_sp_from = plain_decode_sec_part(msg);
600
601         if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
602                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
603                         CERROR("Mal-formed user descriptor\n");
604                         RETURN(SECSVC_DROP);
605                 }
606
607                 req->rq_pack_udesc = 1;
608                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
609         }
610
611         if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
612                 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
613                         CERROR("Mal-formed bulk checksum request\n");
614                         RETURN(SECSVC_DROP);
615                 }
616
617                 req->rq_pack_bulk = 1;
618         }
619
620         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
621         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
622
623         req->rq_svc_ctx = &plain_svc_ctx;
624         atomic_inc(&req->rq_svc_ctx->sc_refcount);
625
626         RETURN(SECSVC_OK);
627 }
628
629 static
630 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
631 {
632         struct ptlrpc_reply_state   *rs;
633         struct ptlrpc_bulk_sec_desc *bsd;
634         __u32                        buflens[PLAIN_PACK_SEGMENTS] = { 0, };
635         int                          rs_size = sizeof(*rs);
636         ENTRY;
637
638         LASSERT(msgsize % 8 == 0);
639
640         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
641
642         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
643                 bsd = lustre_msg_buf(req->rq_reqbuf,
644                                      PLAIN_PACK_BULK_OFF, sizeof(*bsd));
645                 LASSERT(bsd);
646
647                 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
648                                                         bsd->bsd_hash_alg, 0,
649                                                         req->rq_bulk_read);
650         }
651         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
652
653         rs = req->rq_reply_state;
654
655         if (rs) {
656                 /* pre-allocated */
657                 LASSERT(rs->rs_size >= rs_size);
658         } else {
659                 OBD_ALLOC(rs, rs_size);
660                 if (rs == NULL)
661                         RETURN(-ENOMEM);
662
663                 rs->rs_size = rs_size;
664         }
665
666         rs->rs_svc_ctx = req->rq_svc_ctx;
667         atomic_inc(&req->rq_svc_ctx->sc_refcount);
668         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
669         rs->rs_repbuf_len = rs_size - sizeof(*rs);
670
671         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
672         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
673
674         req->rq_reply_state = rs;
675         RETURN(0);
676 }
677
678 static
679 void plain_free_rs(struct ptlrpc_reply_state *rs)
680 {
681         ENTRY;
682
683         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
684         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
685
686         if (!rs->rs_prealloc)
687                 OBD_FREE(rs, rs->rs_size);
688         EXIT;
689 }
690
691 static
692 int plain_authorize(struct ptlrpc_request *req)
693 {
694         struct ptlrpc_reply_state *rs = req->rq_reply_state;
695         struct lustre_msg_v2      *msg = rs->rs_repbuf;
696         int                        len;
697         ENTRY;
698
699         LASSERT(rs);
700         LASSERT(msg);
701
702         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
703                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
704                                         req->rq_replen, 1);
705         else
706                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
707
708         msg->lm_secflvr = req->rq_flvr.sf_rpc;
709         if (req->rq_pack_bulk)
710                 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
711
712         rs->rs_repdata_len = len;
713
714         if (likely(req->rq_packed_final)) {
715                 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
716                         req->rq_reply_off = plain_at_offset;
717                 else
718                         req->rq_reply_off = 0;
719         } else {
720                 msg->lm_cksum = crc32_le(!(__u32) 0,
721                                 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
722                                 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
723                 req->rq_reply_off = 0;
724         }
725
726         RETURN(0);
727 }
728
729 static
730 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
731                           struct ptlrpc_bulk_desc *desc)
732 {
733         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
734
735         LASSERT(rs);
736         LASSERT(req->rq_pack_bulk);
737         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
738         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
739
740         return bulk_csum_svc(desc, req->rq_bulk_read,
741                              lustre_msg_buf(req->rq_reqbuf,
742                                             PLAIN_PACK_BULK_OFF, 0),
743                              lustre_msg_buflen(req->rq_reqbuf,
744                                                PLAIN_PACK_BULK_OFF),
745                              lustre_msg_buf(rs->rs_repbuf,
746                                             PLAIN_PACK_BULK_OFF, 0),
747                              lustre_msg_buflen(rs->rs_repbuf,
748                                                PLAIN_PACK_BULK_OFF));
749 }
750
751 static
752 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
753                         struct ptlrpc_bulk_desc *desc)
754 {
755         struct ptlrpc_reply_state      *rs = req->rq_reply_state;
756
757         LASSERT(rs);
758         LASSERT(req->rq_pack_bulk);
759         LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
760         LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
761
762         return bulk_csum_svc(desc, req->rq_bulk_read,
763                              lustre_msg_buf(req->rq_reqbuf,
764                                             PLAIN_PACK_BULK_OFF, 0),
765                              lustre_msg_buflen(req->rq_reqbuf,
766                                                PLAIN_PACK_BULK_OFF),
767                              lustre_msg_buf(rs->rs_repbuf,
768                                             PLAIN_PACK_BULK_OFF, 0),
769                              lustre_msg_buflen(rs->rs_repbuf,
770                                                PLAIN_PACK_BULK_OFF));
771 }
772
773 static struct ptlrpc_ctx_ops plain_ctx_ops = {
774         .refresh                = plain_ctx_refresh,
775         .validate               = plain_ctx_validate,
776         .sign                   = plain_ctx_sign,
777         .verify                 = plain_ctx_verify,
778         .wrap_bulk              = plain_cli_wrap_bulk,
779         .unwrap_bulk            = plain_cli_unwrap_bulk,
780 };
781
782 static struct ptlrpc_sec_cops plain_sec_cops = {
783         .create_sec             = plain_create_sec,
784         .destroy_sec            = plain_destroy_sec,
785         .kill_sec               = plain_kill_sec,
786         .lookup_ctx             = plain_lookup_ctx,
787         .release_ctx            = plain_release_ctx,
788         .flush_ctx_cache        = plain_flush_ctx_cache,
789         .alloc_reqbuf           = plain_alloc_reqbuf,
790         .alloc_repbuf           = plain_alloc_repbuf,
791         .free_reqbuf            = plain_free_reqbuf,
792         .free_repbuf            = plain_free_repbuf,
793         .enlarge_reqbuf         = plain_enlarge_reqbuf,
794 };
795
796 static struct ptlrpc_sec_sops plain_sec_sops = {
797         .accept                 = plain_accept,
798         .alloc_rs               = plain_alloc_rs,
799         .authorize              = plain_authorize,
800         .free_rs                = plain_free_rs,
801         .unwrap_bulk            = plain_svc_unwrap_bulk,
802         .wrap_bulk              = plain_svc_wrap_bulk,
803 };
804
805 static struct ptlrpc_sec_policy plain_policy = {
806         .sp_owner               = THIS_MODULE,
807         .sp_name                = "plain",
808         .sp_policy              = SPTLRPC_POLICY_PLAIN,
809         .sp_cops                = &plain_sec_cops,
810         .sp_sops                = &plain_sec_sops,
811 };
812
813 int sptlrpc_plain_init(void)
814 {
815         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
816         int rc;
817
818         buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
819         plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
820
821         rc = sptlrpc_register_policy(&plain_policy);
822         if (rc)
823                 CERROR("failed to register: %d\n", rc);
824
825         return rc;
826 }
827
828 void sptlrpc_plain_fini(void)
829 {
830         int rc;
831
832         rc = sptlrpc_unregister_policy(&plain_policy);
833         if (rc)
834                 CERROR("cannot unregister: %d\n", rc);
835 }