1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_cksum.h>
34 #include <obd_class.h>
35 #include <lustre_net.h>
36 #include <lustre_sec.h>
39 struct ptlrpc_sec pls_base;
41 struct ptlrpc_cli_ctx *pls_ctx;
44 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
46 return container_of(sec, struct plain_sec, pls_base);
49 static struct ptlrpc_sec_policy plain_policy;
50 static struct ptlrpc_ctx_ops plain_ctx_ops;
51 static struct ptlrpc_svc_ctx plain_svc_ctx;
53 static unsigned int plain_at_offset;
56 * flavor flags (maximum 8 flags)
58 #define PLAIN_WFLVR_FLAGS_OFFSET (12)
59 #define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
60 #define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
62 #define PLAIN_WFLVR_HAS_BULK(wflvr) \
63 (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
64 #define PLAIN_WFLVR_HAS_USER(wflvr) \
65 (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
67 #define PLAIN_WFLVR_TO_RPC(wflvr) \
68 ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
71 * similar to null sec, temporarily use the third byte of lm_secflvr to identify
72 * the source sec part.
75 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
77 msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
81 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
83 return (msg->lm_secflvr >> 16) & 0xFF;
87 * for simplicity, plain policy rpc use fixed layout.
89 #define PLAIN_PACK_SEGMENTS (3)
91 #define PLAIN_PACK_MSG_OFF (0)
92 #define PLAIN_PACK_USER_OFF (1)
93 #define PLAIN_PACK_BULK_OFF (2)
95 /****************************************
97 ****************************************/
100 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
102 /* should never reach here */
108 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
114 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
116 struct lustre_msg_v2 *msg = req->rq_reqbuf;
119 msg->lm_secflvr = req->rq_flvr.sf_rpc;
120 if (req->rq_pack_bulk)
121 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
122 if (req->rq_pack_udesc)
123 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
125 plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
127 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
133 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
135 struct lustre_msg *msg = req->rq_repdata;
140 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
141 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
145 /* find out if it's an early reply */
146 if ((char *) msg < req->rq_repbuf ||
147 (char *) msg >= req->rq_repbuf + req->rq_repbuf_len)
150 /* expect no user desc in reply */
151 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
152 CERROR("Unexpected udesc flag in reply\n");
156 if (unlikely(early)) {
157 cksum = crc32_le(!(__u32) 0,
158 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
159 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
160 if (cksum != msg->lm_cksum) {
161 CWARN("early reply checksum mismatch: %08x != %08x\n",
162 cpu_to_le32(cksum), msg->lm_cksum);
166 /* whether we sent with bulk or not, we expect the same
167 * in reply, except for early reply */
169 !equi(req->rq_pack_bulk == 1,
170 PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
171 CERROR("%s bulk checksum in reply\n",
172 req->rq_pack_bulk ? "Missing" : "Unexpected");
176 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
177 bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
178 CERROR("Mal-formed bulk checksum reply\n");
183 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
184 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
189 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
190 struct ptlrpc_request *req,
191 struct ptlrpc_bulk_desc *desc)
193 LASSERT(req->rq_pack_bulk);
194 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
196 return bulk_csum_cli_request(desc, req->rq_bulk_read,
197 req->rq_flvr.sf_bulk_hash,
199 PLAIN_PACK_BULK_OFF);
203 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
204 struct ptlrpc_request *req,
205 struct ptlrpc_bulk_desc *desc)
207 LASSERT(req->rq_pack_bulk);
208 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
209 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
211 return bulk_csum_cli_reply(desc, req->rq_bulk_read,
212 req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
213 req->rq_repdata, PLAIN_PACK_BULK_OFF);
216 /****************************************
218 ****************************************/
221 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
223 struct ptlrpc_cli_ctx *ctx, *ctx_new;
225 OBD_ALLOC_PTR(ctx_new);
227 write_lock(&plsec->pls_lock);
229 ctx = plsec->pls_ctx;
231 atomic_inc(&ctx->cc_refcount);
234 OBD_FREE_PTR(ctx_new);
235 } else if (ctx_new) {
238 atomic_set(&ctx->cc_refcount, 1); /* for cache */
239 ctx->cc_sec = &plsec->pls_base;
240 ctx->cc_ops = &plain_ctx_ops;
242 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
243 ctx->cc_vcred.vc_uid = 0;
244 spin_lock_init(&ctx->cc_lock);
245 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
246 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
248 plsec->pls_ctx = ctx;
249 atomic_inc(&plsec->pls_base.ps_nctx);
250 atomic_inc(&plsec->pls_base.ps_refcount);
252 atomic_inc(&ctx->cc_refcount); /* for caller */
255 write_unlock(&plsec->pls_lock);
261 void plain_destroy_sec(struct ptlrpc_sec *sec)
263 struct plain_sec *plsec = sec2plsec(sec);
266 LASSERT(sec->ps_policy == &plain_policy);
267 LASSERT(sec->ps_import);
268 LASSERT(atomic_read(&sec->ps_refcount) == 0);
269 LASSERT(atomic_read(&sec->ps_nctx) == 0);
270 LASSERT(plsec->pls_ctx == NULL);
272 class_import_put(sec->ps_import);
279 void plain_kill_sec(struct ptlrpc_sec *sec)
285 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
286 struct ptlrpc_svc_ctx *svc_ctx,
287 struct sptlrpc_flavor *sf)
289 struct plain_sec *plsec;
290 struct ptlrpc_sec *sec;
291 struct ptlrpc_cli_ctx *ctx;
294 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
296 if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
297 CERROR("plain policy don't support bulk cipher: %u\n",
302 OBD_ALLOC_PTR(plsec);
307 * initialize plain_sec
309 plsec->pls_lock = RW_LOCK_UNLOCKED;
310 plsec->pls_ctx = NULL;
312 sec = &plsec->pls_base;
313 sec->ps_policy = &plain_policy;
314 atomic_set(&sec->ps_refcount, 0);
315 atomic_set(&sec->ps_nctx, 0);
316 sec->ps_id = sptlrpc_get_next_secid();
317 sec->ps_import = class_import_get(imp);
319 sec->ps_lock = SPIN_LOCK_UNLOCKED;
320 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
321 sec->ps_gc_interval = 0;
324 /* install ctx immediately if this is a reverse sec */
326 ctx = plain_sec_install_ctx(plsec);
328 plain_destroy_sec(sec);
331 sptlrpc_cli_ctx_put(ctx, 1);
338 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
339 struct vfs_cred *vcred,
340 int create, int remove_dead)
342 struct plain_sec *plsec = sec2plsec(sec);
343 struct ptlrpc_cli_ctx *ctx;
346 read_lock(&plsec->pls_lock);
347 ctx = plsec->pls_ctx;
349 atomic_inc(&ctx->cc_refcount);
350 read_unlock(&plsec->pls_lock);
352 if (unlikely(ctx == NULL))
353 ctx = plain_sec_install_ctx(plsec);
359 void plain_release_ctx(struct ptlrpc_sec *sec,
360 struct ptlrpc_cli_ctx *ctx, int sync)
362 LASSERT(atomic_read(&sec->ps_refcount) > 0);
363 LASSERT(atomic_read(&sec->ps_nctx) > 0);
364 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
365 LASSERT(ctx->cc_sec == sec);
369 atomic_dec(&sec->ps_nctx);
370 sptlrpc_sec_put(sec);
374 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
375 uid_t uid, int grace, int force)
377 struct plain_sec *plsec = sec2plsec(sec);
378 struct ptlrpc_cli_ctx *ctx;
381 /* do nothing unless caller want to flush for 'all' */
385 write_lock(&plsec->pls_lock);
386 ctx = plsec->pls_ctx;
387 plsec->pls_ctx = NULL;
388 write_unlock(&plsec->pls_lock);
391 sptlrpc_cli_ctx_put(ctx, 1);
396 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
397 struct ptlrpc_request *req,
400 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
404 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
406 if (req->rq_pack_udesc)
407 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
409 if (req->rq_pack_bulk) {
410 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
412 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
413 req->rq_flvr.sf_bulk_hash, 1,
417 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
419 if (!req->rq_reqbuf) {
420 LASSERT(!req->rq_pool);
422 alloc_len = size_roundup_power2(alloc_len);
423 OBD_ALLOC(req->rq_reqbuf, alloc_len);
427 req->rq_reqbuf_len = alloc_len;
429 LASSERT(req->rq_pool);
430 LASSERT(req->rq_reqbuf_len >= alloc_len);
431 memset(req->rq_reqbuf, 0, alloc_len);
434 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
435 req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
437 if (req->rq_pack_udesc)
438 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
444 void plain_free_reqbuf(struct ptlrpc_sec *sec,
445 struct ptlrpc_request *req)
449 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
450 req->rq_reqbuf = NULL;
451 req->rq_reqbuf_len = 0;
454 req->rq_reqmsg = NULL;
459 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
460 struct ptlrpc_request *req,
463 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
467 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
469 if (req->rq_pack_bulk) {
470 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
471 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
472 req->rq_flvr.sf_bulk_hash, 0,
476 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
478 /* add space for early reply */
479 alloc_len += plain_at_offset;
481 alloc_len = size_roundup_power2(alloc_len);
483 OBD_ALLOC(req->rq_repbuf, alloc_len);
487 req->rq_repbuf_len = alloc_len;
492 void plain_free_repbuf(struct ptlrpc_sec *sec,
493 struct ptlrpc_request *req)
496 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
497 req->rq_repbuf = NULL;
498 req->rq_repbuf_len = 0;
500 req->rq_repmsg = NULL;
505 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
506 struct ptlrpc_request *req,
507 int segment, int newsize)
509 struct lustre_msg *newbuf;
511 int newmsg_size, newbuf_size;
514 LASSERT(req->rq_reqbuf);
515 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
516 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
519 /* compute new embedded msg size. */
520 oldsize = req->rq_reqmsg->lm_buflens[segment];
521 req->rq_reqmsg->lm_buflens[segment] = newsize;
522 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
523 req->rq_reqmsg->lm_buflens);
524 req->rq_reqmsg->lm_buflens[segment] = oldsize;
526 /* compute new wrapper msg size. */
527 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
528 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
529 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
530 req->rq_reqbuf->lm_buflens);
531 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
533 /* request from pool should always have enough buffer */
534 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
536 if (req->rq_reqbuf_len < newbuf_size) {
537 newbuf_size = size_roundup_power2(newbuf_size);
539 OBD_ALLOC(newbuf, newbuf_size);
543 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
545 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
546 req->rq_reqbuf = newbuf;
547 req->rq_reqbuf_len = newbuf_size;
548 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
549 PLAIN_PACK_MSG_OFF, 0);
552 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
554 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
556 req->rq_reqlen = newmsg_size;
560 /****************************************
562 ****************************************/
564 static struct ptlrpc_svc_ctx plain_svc_ctx = {
565 .sc_refcount = ATOMIC_INIT(1),
566 .sc_policy = &plain_policy,
570 int plain_accept(struct ptlrpc_request *req)
572 struct lustre_msg *msg = req->rq_reqbuf;
575 LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
577 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
578 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
582 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
583 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
587 req->rq_sp_from = plain_decode_sec_part(msg);
589 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
590 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
591 CERROR("Mal-formed user descriptor\n");
595 req->rq_pack_udesc = 1;
596 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
599 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
600 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
601 CERROR("Mal-formed bulk checksum request\n");
605 req->rq_pack_bulk = 1;
608 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
609 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
611 req->rq_svc_ctx = &plain_svc_ctx;
612 atomic_inc(&req->rq_svc_ctx->sc_refcount);
618 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
620 struct ptlrpc_reply_state *rs;
621 struct ptlrpc_bulk_sec_desc *bsd;
622 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
623 int rs_size = sizeof(*rs);
626 LASSERT(msgsize % 8 == 0);
628 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
630 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
631 bsd = lustre_msg_buf(req->rq_reqbuf,
632 PLAIN_PACK_BULK_OFF, sizeof(*bsd));
635 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
636 bsd->bsd_hash_alg, 0,
639 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
641 rs = req->rq_reply_state;
645 LASSERT(rs->rs_size >= rs_size);
647 OBD_ALLOC(rs, rs_size);
651 rs->rs_size = rs_size;
654 rs->rs_svc_ctx = req->rq_svc_ctx;
655 atomic_inc(&req->rq_svc_ctx->sc_refcount);
656 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
657 rs->rs_repbuf_len = rs_size - sizeof(*rs);
659 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
660 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
662 req->rq_reply_state = rs;
667 void plain_free_rs(struct ptlrpc_reply_state *rs)
671 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
672 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
674 if (!rs->rs_prealloc)
675 OBD_FREE(rs, rs->rs_size);
680 int plain_authorize(struct ptlrpc_request *req)
682 struct ptlrpc_reply_state *rs = req->rq_reply_state;
683 struct lustre_msg_v2 *msg = rs->rs_repbuf;
690 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
691 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
694 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
696 msg->lm_secflvr = req->rq_flvr.sf_rpc;
697 if (req->rq_pack_bulk)
698 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
700 rs->rs_repdata_len = len;
702 if (likely(req->rq_packed_final)) {
703 req->rq_reply_off = plain_at_offset;
705 msg->lm_cksum = crc32_le(!(__u32) 0,
706 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
707 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
708 req->rq_reply_off = 0;
715 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
716 struct ptlrpc_bulk_desc *desc)
718 struct ptlrpc_reply_state *rs = req->rq_reply_state;
721 LASSERT(req->rq_pack_bulk);
722 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
723 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
725 return bulk_csum_svc(desc, req->rq_bulk_read,
726 lustre_msg_buf(req->rq_reqbuf,
727 PLAIN_PACK_BULK_OFF, 0),
728 lustre_msg_buflen(req->rq_reqbuf,
729 PLAIN_PACK_BULK_OFF),
730 lustre_msg_buf(rs->rs_repbuf,
731 PLAIN_PACK_BULK_OFF, 0),
732 lustre_msg_buflen(rs->rs_repbuf,
733 PLAIN_PACK_BULK_OFF));
737 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
738 struct ptlrpc_bulk_desc *desc)
740 struct ptlrpc_reply_state *rs = req->rq_reply_state;
743 LASSERT(req->rq_pack_bulk);
744 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
745 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
747 return bulk_csum_svc(desc, req->rq_bulk_read,
748 lustre_msg_buf(req->rq_reqbuf,
749 PLAIN_PACK_BULK_OFF, 0),
750 lustre_msg_buflen(req->rq_reqbuf,
751 PLAIN_PACK_BULK_OFF),
752 lustre_msg_buf(rs->rs_repbuf,
753 PLAIN_PACK_BULK_OFF, 0),
754 lustre_msg_buflen(rs->rs_repbuf,
755 PLAIN_PACK_BULK_OFF));
758 static struct ptlrpc_ctx_ops plain_ctx_ops = {
759 .refresh = plain_ctx_refresh,
760 .validate = plain_ctx_validate,
761 .sign = plain_ctx_sign,
762 .verify = plain_ctx_verify,
763 .wrap_bulk = plain_cli_wrap_bulk,
764 .unwrap_bulk = plain_cli_unwrap_bulk,
767 static struct ptlrpc_sec_cops plain_sec_cops = {
768 .create_sec = plain_create_sec,
769 .destroy_sec = plain_destroy_sec,
770 .kill_sec = plain_kill_sec,
771 .lookup_ctx = plain_lookup_ctx,
772 .release_ctx = plain_release_ctx,
773 .flush_ctx_cache = plain_flush_ctx_cache,
774 .alloc_reqbuf = plain_alloc_reqbuf,
775 .alloc_repbuf = plain_alloc_repbuf,
776 .free_reqbuf = plain_free_reqbuf,
777 .free_repbuf = plain_free_repbuf,
778 .enlarge_reqbuf = plain_enlarge_reqbuf,
781 static struct ptlrpc_sec_sops plain_sec_sops = {
782 .accept = plain_accept,
783 .alloc_rs = plain_alloc_rs,
784 .authorize = plain_authorize,
785 .free_rs = plain_free_rs,
786 .unwrap_bulk = plain_svc_unwrap_bulk,
787 .wrap_bulk = plain_svc_wrap_bulk,
790 static struct ptlrpc_sec_policy plain_policy = {
791 .sp_owner = THIS_MODULE,
793 .sp_policy = SPTLRPC_POLICY_PLAIN,
794 .sp_cops = &plain_sec_cops,
795 .sp_sops = &plain_sec_sops,
798 int sptlrpc_plain_init(void)
800 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
803 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
804 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
806 rc = sptlrpc_register_policy(&plain_policy);
808 CERROR("failed to register: %d\n", rc);
813 void sptlrpc_plain_fini(void)
817 rc = sptlrpc_unregister_policy(&plain_policy);
819 CERROR("cannot unregister: %d\n", rc);