1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
38 struct ptlrpc_sec pls_base;
40 struct ptlrpc_cli_ctx *pls_ctx;
43 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
45 return container_of(sec, struct plain_sec, pls_base);
48 static struct ptlrpc_sec_policy plain_policy;
49 static struct ptlrpc_ctx_ops plain_ctx_ops;
50 static struct ptlrpc_svc_ctx plain_svc_ctx;
53 * flavor flags (maximum 8 flags)
55 #define PLAIN_WFLVR_FLAGS_OFFSET (12)
56 #define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
57 #define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
59 #define PLAIN_WFLVR_HAS_BULK(wflvr) \
60 (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
61 #define PLAIN_WFLVR_HAS_USER(wflvr) \
62 (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
64 #define PLAIN_WFLVR_TO_RPC(wflvr) \
65 ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
68 * similar to null sec, temporarily use the third byte of lm_secflvr to identify
69 * the source sec part.
72 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
74 msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
78 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
80 return (msg->lm_secflvr >> 16) & 0xFF;
84 * for simplicity, plain policy rpc use fixed layout.
86 #define PLAIN_PACK_SEGMENTS (3)
88 #define PLAIN_PACK_MSG_OFF (0)
89 #define PLAIN_PACK_USER_OFF (1)
90 #define PLAIN_PACK_BULK_OFF (2)
92 /****************************************
94 ****************************************/
97 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
99 /* should never reach here */
105 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
111 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
113 struct lustre_msg_v2 *msg = req->rq_reqbuf;
116 msg->lm_secflvr = req->rq_flvr.sf_rpc;
117 if (req->rq_pack_bulk)
118 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
119 if (req->rq_pack_udesc)
120 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
122 plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
124 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
130 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
132 struct lustre_msg *msg = req->rq_repbuf;
136 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
137 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
141 wflvr = WIRE_FLVR_RPC(msg->lm_secflvr);
143 /* expect no user desc in reply */
144 if (PLAIN_WFLVR_HAS_USER(wflvr)) {
145 CERROR("Unexpected udesc flag in reply\n");
149 /* whether we sent with bulk or not, we expect the same in reply */
150 if (!equi(req->rq_pack_bulk == 1, PLAIN_WFLVR_HAS_BULK(wflvr))) {
151 CERROR("%s bulk checksum in reply\n",
152 req->rq_pack_bulk ? "Missing" : "Unexpected");
156 if (req->rq_pack_bulk &&
157 bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
158 CERROR("Mal-formed bulk checksum reply\n");
162 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
163 req->rq_replen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
168 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
169 struct ptlrpc_request *req,
170 struct ptlrpc_bulk_desc *desc)
172 LASSERT(req->rq_pack_bulk);
173 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
175 return bulk_csum_cli_request(desc, req->rq_bulk_read,
176 req->rq_flvr.sf_bulk_csum,
178 PLAIN_PACK_BULK_OFF);
182 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
183 struct ptlrpc_request *req,
184 struct ptlrpc_bulk_desc *desc)
186 LASSERT(req->rq_pack_bulk);
187 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
188 LASSERT(req->rq_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
190 return bulk_csum_cli_reply(desc, req->rq_bulk_read,
191 req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
192 req->rq_repbuf, PLAIN_PACK_BULK_OFF);
195 /****************************************
197 ****************************************/
200 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
202 struct ptlrpc_cli_ctx *ctx, *ctx_new;
204 OBD_ALLOC_PTR(ctx_new);
206 write_lock(&plsec->pls_lock);
208 ctx = plsec->pls_ctx;
210 atomic_inc(&ctx->cc_refcount);
213 OBD_FREE_PTR(ctx_new);
214 } else if (ctx_new) {
217 atomic_set(&ctx->cc_refcount, 1); /* for cache */
218 ctx->cc_sec = &plsec->pls_base;
219 ctx->cc_ops = &plain_ctx_ops;
221 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
222 ctx->cc_vcred.vc_uid = 0;
223 spin_lock_init(&ctx->cc_lock);
224 INIT_LIST_HEAD(&ctx->cc_req_list);
225 INIT_LIST_HEAD(&ctx->cc_gc_chain);
227 plsec->pls_ctx = ctx;
228 atomic_inc(&plsec->pls_base.ps_nctx);
229 atomic_inc(&plsec->pls_base.ps_refcount);
231 atomic_inc(&ctx->cc_refcount); /* for caller */
234 write_unlock(&plsec->pls_lock);
240 void plain_destroy_sec(struct ptlrpc_sec *sec)
242 struct plain_sec *plsec = sec2plsec(sec);
245 LASSERT(sec->ps_policy == &plain_policy);
246 LASSERT(sec->ps_import);
247 LASSERT(atomic_read(&sec->ps_refcount) == 0);
248 LASSERT(atomic_read(&sec->ps_nctx) == 0);
249 LASSERT(plsec->pls_ctx == NULL);
251 class_import_put(sec->ps_import);
258 void plain_kill_sec(struct ptlrpc_sec *sec)
264 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
265 struct ptlrpc_svc_ctx *svc_ctx,
266 struct sptlrpc_flavor *sf)
268 struct plain_sec *plsec;
269 struct ptlrpc_sec *sec;
270 struct ptlrpc_cli_ctx *ctx;
273 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
275 if (sf->sf_bulk_priv != BULK_PRIV_ALG_NULL) {
276 CERROR("plain policy don't support bulk encryption: %u\n",
281 OBD_ALLOC_PTR(plsec);
286 * initialize plain_sec
288 plsec->pls_lock = RW_LOCK_UNLOCKED;
289 plsec->pls_ctx = NULL;
291 sec = &plsec->pls_base;
292 sec->ps_policy = &plain_policy;
293 atomic_set(&sec->ps_refcount, 0);
294 atomic_set(&sec->ps_nctx, 0);
295 sec->ps_id = sptlrpc_get_next_secid();
296 sec->ps_import = class_import_get(imp);
298 sec->ps_lock = SPIN_LOCK_UNLOCKED;
299 INIT_LIST_HEAD(&sec->ps_gc_list);
300 sec->ps_gc_interval = 0;
303 /* install ctx immediately if this is a reverse sec */
305 ctx = plain_sec_install_ctx(plsec);
307 plain_destroy_sec(sec);
310 sptlrpc_cli_ctx_put(ctx, 1);
317 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
318 struct vfs_cred *vcred,
319 int create, int remove_dead)
321 struct plain_sec *plsec = sec2plsec(sec);
322 struct ptlrpc_cli_ctx *ctx;
325 read_lock(&plsec->pls_lock);
326 ctx = plsec->pls_ctx;
328 atomic_inc(&ctx->cc_refcount);
329 read_unlock(&plsec->pls_lock);
331 if (unlikely(ctx == NULL))
332 ctx = plain_sec_install_ctx(plsec);
338 void plain_release_ctx(struct ptlrpc_sec *sec,
339 struct ptlrpc_cli_ctx *ctx, int sync)
341 LASSERT(atomic_read(&sec->ps_refcount) > 0);
342 LASSERT(atomic_read(&sec->ps_nctx) > 0);
343 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
344 LASSERT(ctx->cc_sec == sec);
348 atomic_dec(&sec->ps_nctx);
349 sptlrpc_sec_put(sec);
353 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
354 uid_t uid, int grace, int force)
356 struct plain_sec *plsec = sec2plsec(sec);
357 struct ptlrpc_cli_ctx *ctx;
360 /* do nothing unless caller want to flush for 'all' */
364 write_lock(&plsec->pls_lock);
365 ctx = plsec->pls_ctx;
366 plsec->pls_ctx = NULL;
367 write_unlock(&plsec->pls_lock);
370 sptlrpc_cli_ctx_put(ctx, 1);
375 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
376 struct ptlrpc_request *req,
379 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
383 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
385 if (req->rq_pack_udesc)
386 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
388 if (req->rq_pack_bulk) {
389 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
391 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
392 req->rq_flvr.sf_bulk_csum, 1,
396 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
398 if (!req->rq_reqbuf) {
399 LASSERT(!req->rq_pool);
401 alloc_len = size_roundup_power2(alloc_len);
402 OBD_ALLOC(req->rq_reqbuf, alloc_len);
406 req->rq_reqbuf_len = alloc_len;
408 LASSERT(req->rq_pool);
409 LASSERT(req->rq_reqbuf_len >= alloc_len);
410 memset(req->rq_reqbuf, 0, alloc_len);
413 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
414 req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
416 if (req->rq_pack_udesc)
417 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
423 void plain_free_reqbuf(struct ptlrpc_sec *sec,
424 struct ptlrpc_request *req)
428 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
429 req->rq_reqbuf = NULL;
430 req->rq_reqbuf_len = 0;
433 req->rq_reqmsg = NULL;
438 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
439 struct ptlrpc_request *req,
442 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
446 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
448 if (req->rq_pack_bulk) {
449 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
451 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
452 req->rq_flvr.sf_bulk_csum, 0,
456 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
457 alloc_len = size_roundup_power2(alloc_len);
459 OBD_ALLOC(req->rq_repbuf, alloc_len);
463 req->rq_repbuf_len = alloc_len;
468 void plain_free_repbuf(struct ptlrpc_sec *sec,
469 struct ptlrpc_request *req)
472 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
473 req->rq_repbuf = NULL;
474 req->rq_repbuf_len = 0;
476 req->rq_repmsg = NULL;
481 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
482 struct ptlrpc_request *req,
483 int segment, int newsize)
485 struct lustre_msg *newbuf;
487 int newmsg_size, newbuf_size;
490 LASSERT(req->rq_reqbuf);
491 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
492 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
495 /* compute new embedded msg size. */
496 oldsize = req->rq_reqmsg->lm_buflens[segment];
497 req->rq_reqmsg->lm_buflens[segment] = newsize;
498 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
499 req->rq_reqmsg->lm_buflens);
500 req->rq_reqmsg->lm_buflens[segment] = oldsize;
502 /* compute new wrapper msg size. */
503 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
504 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
505 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
506 req->rq_reqbuf->lm_buflens);
507 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
509 /* request from pool should always have enough buffer */
510 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
512 if (req->rq_reqbuf_len < newbuf_size) {
513 newbuf_size = size_roundup_power2(newbuf_size);
515 OBD_ALLOC(newbuf, newbuf_size);
519 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
521 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
522 req->rq_reqbuf = newbuf;
523 req->rq_reqbuf_len = newbuf_size;
524 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
525 PLAIN_PACK_MSG_OFF, 0);
528 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
530 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
532 req->rq_reqlen = newmsg_size;
536 /****************************************
538 ****************************************/
540 static struct ptlrpc_svc_ctx plain_svc_ctx = {
541 .sc_refcount = ATOMIC_INIT(1),
542 .sc_policy = &plain_policy,
546 int plain_accept(struct ptlrpc_request *req)
548 struct lustre_msg *msg = req->rq_reqbuf;
551 LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
553 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
554 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
558 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
559 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
563 req->rq_sp_from = plain_decode_sec_part(msg);
565 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
566 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
567 CERROR("Mal-formed user descriptor\n");
571 req->rq_pack_udesc = 1;
572 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
575 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
576 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
577 CERROR("Mal-formed bulk checksum request\n");
581 req->rq_pack_bulk = 1;
584 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
585 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
587 req->rq_svc_ctx = &plain_svc_ctx;
588 atomic_inc(&req->rq_svc_ctx->sc_refcount);
594 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
596 struct ptlrpc_reply_state *rs;
597 struct ptlrpc_bulk_sec_desc *bsd;
598 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
599 int rs_size = sizeof(*rs);
602 LASSERT(msgsize % 8 == 0);
604 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
606 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
607 bsd = lustre_msg_buf(req->rq_reqbuf,
608 PLAIN_PACK_BULK_OFF, sizeof(*bsd));
611 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
612 bsd->bsd_csum_alg, 0,
615 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
617 rs = req->rq_reply_state;
621 LASSERT(rs->rs_size >= rs_size);
623 OBD_ALLOC(rs, rs_size);
627 rs->rs_size = rs_size;
630 rs->rs_svc_ctx = req->rq_svc_ctx;
631 atomic_inc(&req->rq_svc_ctx->sc_refcount);
632 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
633 rs->rs_repbuf_len = rs_size - sizeof(*rs);
635 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
636 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
638 req->rq_reply_state = rs;
643 void plain_free_rs(struct ptlrpc_reply_state *rs)
647 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
648 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
650 if (!rs->rs_prealloc)
651 OBD_FREE(rs, rs->rs_size);
656 int plain_authorize(struct ptlrpc_request *req)
658 struct ptlrpc_reply_state *rs = req->rq_reply_state;
659 struct lustre_msg_v2 *msg = rs->rs_repbuf;
666 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
667 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
670 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
672 msg->lm_secflvr = req->rq_flvr.sf_rpc;
673 if (req->rq_pack_bulk)
674 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
676 rs->rs_repdata_len = len;
681 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
682 struct ptlrpc_bulk_desc *desc)
684 struct ptlrpc_reply_state *rs = req->rq_reply_state;
687 LASSERT(req->rq_pack_bulk);
688 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
689 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
691 return bulk_csum_svc(desc, req->rq_bulk_read,
692 lustre_msg_buf(req->rq_reqbuf,
693 PLAIN_PACK_BULK_OFF, 0),
694 lustre_msg_buflen(req->rq_reqbuf,
695 PLAIN_PACK_BULK_OFF),
696 lustre_msg_buf(rs->rs_repbuf,
697 PLAIN_PACK_BULK_OFF, 0),
698 lustre_msg_buflen(rs->rs_repbuf,
699 PLAIN_PACK_BULK_OFF));
703 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
704 struct ptlrpc_bulk_desc *desc)
706 struct ptlrpc_reply_state *rs = req->rq_reply_state;
709 LASSERT(req->rq_pack_bulk);
710 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
711 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
713 return bulk_csum_svc(desc, req->rq_bulk_read,
714 lustre_msg_buf(req->rq_reqbuf,
715 PLAIN_PACK_BULK_OFF, 0),
716 lustre_msg_buflen(req->rq_reqbuf,
717 PLAIN_PACK_BULK_OFF),
718 lustre_msg_buf(rs->rs_repbuf,
719 PLAIN_PACK_BULK_OFF, 0),
720 lustre_msg_buflen(rs->rs_repbuf,
721 PLAIN_PACK_BULK_OFF));
724 static struct ptlrpc_ctx_ops plain_ctx_ops = {
725 .refresh = plain_ctx_refresh,
726 .validate = plain_ctx_validate,
727 .sign = plain_ctx_sign,
728 .verify = plain_ctx_verify,
729 .wrap_bulk = plain_cli_wrap_bulk,
730 .unwrap_bulk = plain_cli_unwrap_bulk,
733 static struct ptlrpc_sec_cops plain_sec_cops = {
734 .create_sec = plain_create_sec,
735 .destroy_sec = plain_destroy_sec,
736 .kill_sec = plain_kill_sec,
737 .lookup_ctx = plain_lookup_ctx,
738 .release_ctx = plain_release_ctx,
739 .flush_ctx_cache = plain_flush_ctx_cache,
740 .alloc_reqbuf = plain_alloc_reqbuf,
741 .alloc_repbuf = plain_alloc_repbuf,
742 .free_reqbuf = plain_free_reqbuf,
743 .free_repbuf = plain_free_repbuf,
744 .enlarge_reqbuf = plain_enlarge_reqbuf,
747 static struct ptlrpc_sec_sops plain_sec_sops = {
748 .accept = plain_accept,
749 .alloc_rs = plain_alloc_rs,
750 .authorize = plain_authorize,
751 .free_rs = plain_free_rs,
752 .unwrap_bulk = plain_svc_unwrap_bulk,
753 .wrap_bulk = plain_svc_wrap_bulk,
756 static struct ptlrpc_sec_policy plain_policy = {
757 .sp_owner = THIS_MODULE,
759 .sp_policy = SPTLRPC_POLICY_PLAIN,
760 .sp_cops = &plain_sec_cops,
761 .sp_sops = &plain_sec_sops,
764 int sptlrpc_plain_init(void)
768 rc = sptlrpc_register_policy(&plain_policy);
770 CERROR("failed to register: %d\n", rc);
775 void sptlrpc_plain_fini(void)
779 rc = sptlrpc_unregister_policy(&plain_policy);
781 CERROR("cannot unregister: %d\n", rc);