1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
38 struct ptlrpc_sec pls_base;
40 struct ptlrpc_cli_ctx *pls_ctx;
43 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
45 return container_of(sec, struct plain_sec, pls_base);
48 static struct ptlrpc_sec_policy plain_policy;
49 static struct ptlrpc_ctx_ops plain_ctx_ops;
50 static struct ptlrpc_svc_ctx plain_svc_ctx;
53 * flavor flags (maximum 8 flags)
55 #define PLAIN_WFLVR_FLAGS_OFFSET (12)
56 #define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
57 #define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
59 #define PLAIN_WFLVR_HAS_BULK(wflvr) \
60 (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
61 #define PLAIN_WFLVR_HAS_USER(wflvr) \
62 (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
64 #define PLAIN_WFLVR_TO_RPC(wflvr) \
65 ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
68 * similar to null sec, temporarily use the third byte of lm_secflvr to identify
69 * the source sec part.
72 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
74 msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
78 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
80 return (msg->lm_secflvr >> 16) & 0xFF;
84 * for simplicity, plain policy rpc use fixed layout.
86 #define PLAIN_PACK_SEGMENTS (3)
88 #define PLAIN_PACK_MSG_OFF (0)
89 #define PLAIN_PACK_USER_OFF (1)
90 #define PLAIN_PACK_BULK_OFF (2)
92 /****************************************
94 ****************************************/
97 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
99 /* should never reach here */
105 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
111 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
113 struct lustre_msg_v2 *msg = req->rq_reqbuf;
116 msg->lm_secflvr = req->rq_flvr.sf_rpc;
117 if (req->rq_pack_bulk)
118 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
119 if (req->rq_pack_udesc)
120 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
122 plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
124 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
130 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
132 struct lustre_msg *msg = req->rq_repbuf;
135 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
136 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
140 /* expect no user desc in reply */
141 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
142 CERROR("Unexpected udesc flag in reply\n");
146 /* whether we sent with bulk or not, we expect the same in reply */
147 if (!equi(req->rq_pack_bulk == 1,
148 PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
149 CERROR("%s bulk checksum in reply\n",
150 req->rq_pack_bulk ? "Missing" : "Unexpected");
154 if (req->rq_pack_bulk &&
155 bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
156 CERROR("Mal-formed bulk checksum reply\n");
160 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
161 req->rq_replen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
166 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
167 struct ptlrpc_request *req,
168 struct ptlrpc_bulk_desc *desc)
170 LASSERT(req->rq_pack_bulk);
171 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
173 return bulk_csum_cli_request(desc, req->rq_bulk_read,
174 req->rq_flvr.sf_bulk_hash,
176 PLAIN_PACK_BULK_OFF);
180 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
181 struct ptlrpc_request *req,
182 struct ptlrpc_bulk_desc *desc)
184 LASSERT(req->rq_pack_bulk);
185 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
186 LASSERT(req->rq_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
188 return bulk_csum_cli_reply(desc, req->rq_bulk_read,
189 req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
190 req->rq_repbuf, PLAIN_PACK_BULK_OFF);
193 /****************************************
195 ****************************************/
198 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
200 struct ptlrpc_cli_ctx *ctx, *ctx_new;
202 OBD_ALLOC_PTR(ctx_new);
204 write_lock(&plsec->pls_lock);
206 ctx = plsec->pls_ctx;
208 atomic_inc(&ctx->cc_refcount);
211 OBD_FREE_PTR(ctx_new);
212 } else if (ctx_new) {
215 atomic_set(&ctx->cc_refcount, 1); /* for cache */
216 ctx->cc_sec = &plsec->pls_base;
217 ctx->cc_ops = &plain_ctx_ops;
219 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
220 ctx->cc_vcred.vc_uid = 0;
221 spin_lock_init(&ctx->cc_lock);
222 INIT_LIST_HEAD(&ctx->cc_req_list);
223 INIT_LIST_HEAD(&ctx->cc_gc_chain);
225 plsec->pls_ctx = ctx;
226 atomic_inc(&plsec->pls_base.ps_nctx);
227 atomic_inc(&plsec->pls_base.ps_refcount);
229 atomic_inc(&ctx->cc_refcount); /* for caller */
232 write_unlock(&plsec->pls_lock);
238 void plain_destroy_sec(struct ptlrpc_sec *sec)
240 struct plain_sec *plsec = sec2plsec(sec);
243 LASSERT(sec->ps_policy == &plain_policy);
244 LASSERT(sec->ps_import);
245 LASSERT(atomic_read(&sec->ps_refcount) == 0);
246 LASSERT(atomic_read(&sec->ps_nctx) == 0);
247 LASSERT(plsec->pls_ctx == NULL);
249 class_import_put(sec->ps_import);
256 void plain_kill_sec(struct ptlrpc_sec *sec)
262 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
263 struct ptlrpc_svc_ctx *svc_ctx,
264 struct sptlrpc_flavor *sf)
266 struct plain_sec *plsec;
267 struct ptlrpc_sec *sec;
268 struct ptlrpc_cli_ctx *ctx;
271 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
273 if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
274 CERROR("plain policy don't support bulk cipher: %u\n",
279 OBD_ALLOC_PTR(plsec);
284 * initialize plain_sec
286 plsec->pls_lock = RW_LOCK_UNLOCKED;
287 plsec->pls_ctx = NULL;
289 sec = &plsec->pls_base;
290 sec->ps_policy = &plain_policy;
291 atomic_set(&sec->ps_refcount, 0);
292 atomic_set(&sec->ps_nctx, 0);
293 sec->ps_id = sptlrpc_get_next_secid();
294 sec->ps_import = class_import_get(imp);
296 sec->ps_lock = SPIN_LOCK_UNLOCKED;
297 INIT_LIST_HEAD(&sec->ps_gc_list);
298 sec->ps_gc_interval = 0;
301 /* install ctx immediately if this is a reverse sec */
303 ctx = plain_sec_install_ctx(plsec);
305 plain_destroy_sec(sec);
308 sptlrpc_cli_ctx_put(ctx, 1);
315 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
316 struct vfs_cred *vcred,
317 int create, int remove_dead)
319 struct plain_sec *plsec = sec2plsec(sec);
320 struct ptlrpc_cli_ctx *ctx;
323 read_lock(&plsec->pls_lock);
324 ctx = plsec->pls_ctx;
326 atomic_inc(&ctx->cc_refcount);
327 read_unlock(&plsec->pls_lock);
329 if (unlikely(ctx == NULL))
330 ctx = plain_sec_install_ctx(plsec);
336 void plain_release_ctx(struct ptlrpc_sec *sec,
337 struct ptlrpc_cli_ctx *ctx, int sync)
339 LASSERT(atomic_read(&sec->ps_refcount) > 0);
340 LASSERT(atomic_read(&sec->ps_nctx) > 0);
341 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
342 LASSERT(ctx->cc_sec == sec);
346 atomic_dec(&sec->ps_nctx);
347 sptlrpc_sec_put(sec);
351 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
352 uid_t uid, int grace, int force)
354 struct plain_sec *plsec = sec2plsec(sec);
355 struct ptlrpc_cli_ctx *ctx;
358 /* do nothing unless caller want to flush for 'all' */
362 write_lock(&plsec->pls_lock);
363 ctx = plsec->pls_ctx;
364 plsec->pls_ctx = NULL;
365 write_unlock(&plsec->pls_lock);
368 sptlrpc_cli_ctx_put(ctx, 1);
373 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
374 struct ptlrpc_request *req,
377 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
381 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
383 if (req->rq_pack_udesc)
384 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
386 if (req->rq_pack_bulk) {
387 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
389 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
390 req->rq_flvr.sf_bulk_hash, 1,
394 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
396 if (!req->rq_reqbuf) {
397 LASSERT(!req->rq_pool);
399 alloc_len = size_roundup_power2(alloc_len);
400 OBD_ALLOC(req->rq_reqbuf, alloc_len);
404 req->rq_reqbuf_len = alloc_len;
406 LASSERT(req->rq_pool);
407 LASSERT(req->rq_reqbuf_len >= alloc_len);
408 memset(req->rq_reqbuf, 0, alloc_len);
411 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
412 req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
414 if (req->rq_pack_udesc)
415 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
421 void plain_free_reqbuf(struct ptlrpc_sec *sec,
422 struct ptlrpc_request *req)
426 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
427 req->rq_reqbuf = NULL;
428 req->rq_reqbuf_len = 0;
431 req->rq_reqmsg = NULL;
436 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
437 struct ptlrpc_request *req,
440 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
444 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
446 if (req->rq_pack_bulk) {
447 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
449 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
450 req->rq_flvr.sf_bulk_hash, 0,
454 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
455 alloc_len = size_roundup_power2(alloc_len);
457 OBD_ALLOC(req->rq_repbuf, alloc_len);
461 req->rq_repbuf_len = alloc_len;
466 void plain_free_repbuf(struct ptlrpc_sec *sec,
467 struct ptlrpc_request *req)
470 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
471 req->rq_repbuf = NULL;
472 req->rq_repbuf_len = 0;
474 req->rq_repmsg = NULL;
479 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
480 struct ptlrpc_request *req,
481 int segment, int newsize)
483 struct lustre_msg *newbuf;
485 int newmsg_size, newbuf_size;
488 LASSERT(req->rq_reqbuf);
489 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
490 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
493 /* compute new embedded msg size. */
494 oldsize = req->rq_reqmsg->lm_buflens[segment];
495 req->rq_reqmsg->lm_buflens[segment] = newsize;
496 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
497 req->rq_reqmsg->lm_buflens);
498 req->rq_reqmsg->lm_buflens[segment] = oldsize;
500 /* compute new wrapper msg size. */
501 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
502 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
503 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
504 req->rq_reqbuf->lm_buflens);
505 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
507 /* request from pool should always have enough buffer */
508 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
510 if (req->rq_reqbuf_len < newbuf_size) {
511 newbuf_size = size_roundup_power2(newbuf_size);
513 OBD_ALLOC(newbuf, newbuf_size);
517 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
519 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
520 req->rq_reqbuf = newbuf;
521 req->rq_reqbuf_len = newbuf_size;
522 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
523 PLAIN_PACK_MSG_OFF, 0);
526 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
528 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
530 req->rq_reqlen = newmsg_size;
534 /****************************************
536 ****************************************/
538 static struct ptlrpc_svc_ctx plain_svc_ctx = {
539 .sc_refcount = ATOMIC_INIT(1),
540 .sc_policy = &plain_policy,
544 int plain_accept(struct ptlrpc_request *req)
546 struct lustre_msg *msg = req->rq_reqbuf;
549 LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
551 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
552 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
556 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
557 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
561 req->rq_sp_from = plain_decode_sec_part(msg);
563 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
564 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
565 CERROR("Mal-formed user descriptor\n");
569 req->rq_pack_udesc = 1;
570 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
573 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
574 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
575 CERROR("Mal-formed bulk checksum request\n");
579 req->rq_pack_bulk = 1;
582 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
583 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
585 req->rq_svc_ctx = &plain_svc_ctx;
586 atomic_inc(&req->rq_svc_ctx->sc_refcount);
592 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
594 struct ptlrpc_reply_state *rs;
595 struct ptlrpc_bulk_sec_desc *bsd;
596 int buflens[PLAIN_PACK_SEGMENTS] = { 0, };
597 int rs_size = sizeof(*rs);
600 LASSERT(msgsize % 8 == 0);
602 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
604 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
605 bsd = lustre_msg_buf(req->rq_reqbuf,
606 PLAIN_PACK_BULK_OFF, sizeof(*bsd));
609 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
610 bsd->bsd_hash_alg, 0,
613 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
615 rs = req->rq_reply_state;
619 LASSERT(rs->rs_size >= rs_size);
621 OBD_ALLOC(rs, rs_size);
625 rs->rs_size = rs_size;
628 rs->rs_svc_ctx = req->rq_svc_ctx;
629 atomic_inc(&req->rq_svc_ctx->sc_refcount);
630 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
631 rs->rs_repbuf_len = rs_size - sizeof(*rs);
633 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
634 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
636 req->rq_reply_state = rs;
641 void plain_free_rs(struct ptlrpc_reply_state *rs)
645 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
646 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
648 if (!rs->rs_prealloc)
649 OBD_FREE(rs, rs->rs_size);
654 int plain_authorize(struct ptlrpc_request *req)
656 struct ptlrpc_reply_state *rs = req->rq_reply_state;
657 struct lustre_msg_v2 *msg = rs->rs_repbuf;
664 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
665 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
668 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
670 msg->lm_secflvr = req->rq_flvr.sf_rpc;
671 if (req->rq_pack_bulk)
672 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
674 rs->rs_repdata_len = len;
679 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
680 struct ptlrpc_bulk_desc *desc)
682 struct ptlrpc_reply_state *rs = req->rq_reply_state;
685 LASSERT(req->rq_pack_bulk);
686 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
687 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
689 return bulk_csum_svc(desc, req->rq_bulk_read,
690 lustre_msg_buf(req->rq_reqbuf,
691 PLAIN_PACK_BULK_OFF, 0),
692 lustre_msg_buflen(req->rq_reqbuf,
693 PLAIN_PACK_BULK_OFF),
694 lustre_msg_buf(rs->rs_repbuf,
695 PLAIN_PACK_BULK_OFF, 0),
696 lustre_msg_buflen(rs->rs_repbuf,
697 PLAIN_PACK_BULK_OFF));
701 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
702 struct ptlrpc_bulk_desc *desc)
704 struct ptlrpc_reply_state *rs = req->rq_reply_state;
707 LASSERT(req->rq_pack_bulk);
708 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
709 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
711 return bulk_csum_svc(desc, req->rq_bulk_read,
712 lustre_msg_buf(req->rq_reqbuf,
713 PLAIN_PACK_BULK_OFF, 0),
714 lustre_msg_buflen(req->rq_reqbuf,
715 PLAIN_PACK_BULK_OFF),
716 lustre_msg_buf(rs->rs_repbuf,
717 PLAIN_PACK_BULK_OFF, 0),
718 lustre_msg_buflen(rs->rs_repbuf,
719 PLAIN_PACK_BULK_OFF));
722 static struct ptlrpc_ctx_ops plain_ctx_ops = {
723 .refresh = plain_ctx_refresh,
724 .validate = plain_ctx_validate,
725 .sign = plain_ctx_sign,
726 .verify = plain_ctx_verify,
727 .wrap_bulk = plain_cli_wrap_bulk,
728 .unwrap_bulk = plain_cli_unwrap_bulk,
731 static struct ptlrpc_sec_cops plain_sec_cops = {
732 .create_sec = plain_create_sec,
733 .destroy_sec = plain_destroy_sec,
734 .kill_sec = plain_kill_sec,
735 .lookup_ctx = plain_lookup_ctx,
736 .release_ctx = plain_release_ctx,
737 .flush_ctx_cache = plain_flush_ctx_cache,
738 .alloc_reqbuf = plain_alloc_reqbuf,
739 .alloc_repbuf = plain_alloc_repbuf,
740 .free_reqbuf = plain_free_reqbuf,
741 .free_repbuf = plain_free_repbuf,
742 .enlarge_reqbuf = plain_enlarge_reqbuf,
745 static struct ptlrpc_sec_sops plain_sec_sops = {
746 .accept = plain_accept,
747 .alloc_rs = plain_alloc_rs,
748 .authorize = plain_authorize,
749 .free_rs = plain_free_rs,
750 .unwrap_bulk = plain_svc_unwrap_bulk,
751 .wrap_bulk = plain_svc_wrap_bulk,
754 static struct ptlrpc_sec_policy plain_policy = {
755 .sp_owner = THIS_MODULE,
757 .sp_policy = SPTLRPC_POLICY_PLAIN,
758 .sp_cops = &plain_sec_cops,
759 .sp_sops = &plain_sec_sops,
762 int sptlrpc_plain_init(void)
766 rc = sptlrpc_register_policy(&plain_policy);
768 CERROR("failed to register: %d\n", rc);
773 void sptlrpc_plain_fini(void)
777 rc = sptlrpc_unregister_policy(&plain_policy);
779 CERROR("cannot unregister: %d\n", rc);