1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_plain.c
38 * Author: Eric Mei <ericm@clusterfs.com>
42 # define EXPORT_SYMTAB
44 #define DEBUG_SUBSYSTEM S_SEC
47 #include <liblustre.h>
50 #include <obd_support.h>
51 #include <obd_cksum.h>
52 #include <obd_class.h>
53 #include <lustre_net.h>
54 #include <lustre_sec.h>
57 struct ptlrpc_sec pls_base;
59 struct ptlrpc_cli_ctx *pls_ctx;
62 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
64 return container_of(sec, struct plain_sec, pls_base);
67 static struct ptlrpc_sec_policy plain_policy;
68 static struct ptlrpc_ctx_ops plain_ctx_ops;
69 static struct ptlrpc_svc_ctx plain_svc_ctx;
71 static unsigned int plain_at_offset;
74 * flavor flags (maximum 8 flags)
76 #define PLAIN_WFLVR_FLAGS_OFFSET (12)
77 #define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
78 #define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
80 #define PLAIN_WFLVR_HAS_BULK(wflvr) \
81 (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
82 #define PLAIN_WFLVR_HAS_USER(wflvr) \
83 (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
85 #define PLAIN_WFLVR_TO_RPC(wflvr) \
86 ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
89 * similar to null sec, temporarily use the third byte of lm_secflvr to identify
90 * the source sec part.
93 void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
95 msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
99 enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
101 return (msg->lm_secflvr >> 16) & 0xFF;
105 * for simplicity, plain policy rpc use fixed layout.
107 #define PLAIN_PACK_SEGMENTS (3)
109 #define PLAIN_PACK_MSG_OFF (0)
110 #define PLAIN_PACK_USER_OFF (1)
111 #define PLAIN_PACK_BULK_OFF (2)
113 /****************************************
115 ****************************************/
118 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
120 /* should never reach here */
126 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
132 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
134 struct lustre_msg_v2 *msg = req->rq_reqbuf;
137 msg->lm_secflvr = req->rq_flvr.sf_rpc;
138 if (req->rq_pack_bulk)
139 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
140 if (req->rq_pack_udesc)
141 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
143 plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
145 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
151 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
153 struct lustre_msg *msg = req->rq_repdata;
157 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
158 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
162 /* expect no user desc in reply */
163 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
164 CERROR("Unexpected udesc flag in reply\n");
168 if (unlikely(req->rq_early)) {
169 cksum = crc32_le(!(__u32) 0,
170 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
171 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
172 if (cksum != msg->lm_cksum) {
173 CWARN("early reply checksum mismatch: %08x != %08x\n",
174 cpu_to_le32(cksum), msg->lm_cksum);
178 /* whether we sent with bulk or not, we expect the same
179 * in reply, except for early reply */
180 if (!req->rq_early &&
181 !equi(req->rq_pack_bulk == 1,
182 PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
183 CERROR("%s bulk checksum in reply\n",
184 req->rq_pack_bulk ? "Missing" : "Unexpected");
188 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
189 bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
190 CERROR("Mal-formed bulk checksum reply\n");
195 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
196 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
201 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
202 struct ptlrpc_request *req,
203 struct ptlrpc_bulk_desc *desc)
205 LASSERT(req->rq_pack_bulk);
206 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
208 return bulk_csum_cli_request(desc, req->rq_bulk_read,
209 req->rq_flvr.sf_bulk_hash,
211 PLAIN_PACK_BULK_OFF);
215 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
216 struct ptlrpc_request *req,
217 struct ptlrpc_bulk_desc *desc)
219 LASSERT(req->rq_pack_bulk);
220 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
221 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
223 return bulk_csum_cli_reply(desc, req->rq_bulk_read,
224 req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
225 req->rq_repdata, PLAIN_PACK_BULK_OFF);
228 /****************************************
230 ****************************************/
233 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
235 struct ptlrpc_cli_ctx *ctx, *ctx_new;
237 OBD_ALLOC_PTR(ctx_new);
239 write_lock(&plsec->pls_lock);
241 ctx = plsec->pls_ctx;
243 atomic_inc(&ctx->cc_refcount);
246 OBD_FREE_PTR(ctx_new);
247 } else if (ctx_new) {
250 atomic_set(&ctx->cc_refcount, 1); /* for cache */
251 ctx->cc_sec = &plsec->pls_base;
252 ctx->cc_ops = &plain_ctx_ops;
254 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
255 ctx->cc_vcred.vc_uid = 0;
256 spin_lock_init(&ctx->cc_lock);
257 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
258 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
260 plsec->pls_ctx = ctx;
261 atomic_inc(&plsec->pls_base.ps_nctx);
262 atomic_inc(&plsec->pls_base.ps_refcount);
264 atomic_inc(&ctx->cc_refcount); /* for caller */
267 write_unlock(&plsec->pls_lock);
273 void plain_destroy_sec(struct ptlrpc_sec *sec)
275 struct plain_sec *plsec = sec2plsec(sec);
278 LASSERT(sec->ps_policy == &plain_policy);
279 LASSERT(sec->ps_import);
280 LASSERT(atomic_read(&sec->ps_refcount) == 0);
281 LASSERT(atomic_read(&sec->ps_nctx) == 0);
282 LASSERT(plsec->pls_ctx == NULL);
284 class_import_put(sec->ps_import);
291 void plain_kill_sec(struct ptlrpc_sec *sec)
297 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
298 struct ptlrpc_svc_ctx *svc_ctx,
299 struct sptlrpc_flavor *sf)
301 struct plain_sec *plsec;
302 struct ptlrpc_sec *sec;
303 struct ptlrpc_cli_ctx *ctx;
306 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
308 if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
309 CERROR("plain policy don't support bulk cipher: %u\n",
314 OBD_ALLOC_PTR(plsec);
319 * initialize plain_sec
321 rwlock_init(&plsec->pls_lock);
322 plsec->pls_ctx = NULL;
324 sec = &plsec->pls_base;
325 sec->ps_policy = &plain_policy;
326 atomic_set(&sec->ps_refcount, 0);
327 atomic_set(&sec->ps_nctx, 0);
328 sec->ps_id = sptlrpc_get_next_secid();
329 sec->ps_import = class_import_get(imp);
331 spin_lock_init(&sec->ps_lock);
332 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
333 sec->ps_gc_interval = 0;
336 /* install ctx immediately if this is a reverse sec */
338 ctx = plain_sec_install_ctx(plsec);
340 plain_destroy_sec(sec);
343 sptlrpc_cli_ctx_put(ctx, 1);
350 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
351 struct vfs_cred *vcred,
352 int create, int remove_dead)
354 struct plain_sec *plsec = sec2plsec(sec);
355 struct ptlrpc_cli_ctx *ctx;
358 read_lock(&plsec->pls_lock);
359 ctx = plsec->pls_ctx;
361 atomic_inc(&ctx->cc_refcount);
362 read_unlock(&plsec->pls_lock);
364 if (unlikely(ctx == NULL))
365 ctx = plain_sec_install_ctx(plsec);
371 void plain_release_ctx(struct ptlrpc_sec *sec,
372 struct ptlrpc_cli_ctx *ctx, int sync)
374 LASSERT(atomic_read(&sec->ps_refcount) > 0);
375 LASSERT(atomic_read(&sec->ps_nctx) > 0);
376 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
377 LASSERT(ctx->cc_sec == sec);
381 atomic_dec(&sec->ps_nctx);
382 sptlrpc_sec_put(sec);
386 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
387 uid_t uid, int grace, int force)
389 struct plain_sec *plsec = sec2plsec(sec);
390 struct ptlrpc_cli_ctx *ctx;
393 /* do nothing unless caller want to flush for 'all' */
397 write_lock(&plsec->pls_lock);
398 ctx = plsec->pls_ctx;
399 plsec->pls_ctx = NULL;
400 write_unlock(&plsec->pls_lock);
403 sptlrpc_cli_ctx_put(ctx, 1);
408 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
409 struct ptlrpc_request *req,
412 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
416 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
418 if (req->rq_pack_udesc)
419 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
421 if (req->rq_pack_bulk) {
422 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
424 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
425 req->rq_flvr.sf_bulk_hash, 1,
429 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
431 if (!req->rq_reqbuf) {
432 LASSERT(!req->rq_pool);
434 alloc_len = size_roundup_power2(alloc_len);
435 OBD_ALLOC(req->rq_reqbuf, alloc_len);
439 req->rq_reqbuf_len = alloc_len;
441 LASSERT(req->rq_pool);
442 LASSERT(req->rq_reqbuf_len >= alloc_len);
443 memset(req->rq_reqbuf, 0, alloc_len);
446 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
447 req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
449 if (req->rq_pack_udesc)
450 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
456 void plain_free_reqbuf(struct ptlrpc_sec *sec,
457 struct ptlrpc_request *req)
461 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
462 req->rq_reqbuf = NULL;
463 req->rq_reqbuf_len = 0;
466 req->rq_reqmsg = NULL;
471 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
472 struct ptlrpc_request *req,
475 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
479 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
481 if (req->rq_pack_bulk) {
482 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
483 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
484 req->rq_flvr.sf_bulk_hash, 0,
488 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
490 /* add space for early reply */
491 alloc_len += plain_at_offset;
493 alloc_len = size_roundup_power2(alloc_len);
495 OBD_ALLOC(req->rq_repbuf, alloc_len);
499 req->rq_repbuf_len = alloc_len;
504 void plain_free_repbuf(struct ptlrpc_sec *sec,
505 struct ptlrpc_request *req)
508 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
509 req->rq_repbuf = NULL;
510 req->rq_repbuf_len = 0;
512 req->rq_repmsg = NULL;
517 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
518 struct ptlrpc_request *req,
519 int segment, int newsize)
521 struct lustre_msg *newbuf;
523 int newmsg_size, newbuf_size;
526 LASSERT(req->rq_reqbuf);
527 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
528 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
531 /* compute new embedded msg size. */
532 oldsize = req->rq_reqmsg->lm_buflens[segment];
533 req->rq_reqmsg->lm_buflens[segment] = newsize;
534 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
535 req->rq_reqmsg->lm_buflens);
536 req->rq_reqmsg->lm_buflens[segment] = oldsize;
538 /* compute new wrapper msg size. */
539 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
540 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
541 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
542 req->rq_reqbuf->lm_buflens);
543 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
545 /* request from pool should always have enough buffer */
546 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
548 if (req->rq_reqbuf_len < newbuf_size) {
549 newbuf_size = size_roundup_power2(newbuf_size);
551 OBD_ALLOC(newbuf, newbuf_size);
555 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
557 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
558 req->rq_reqbuf = newbuf;
559 req->rq_reqbuf_len = newbuf_size;
560 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
561 PLAIN_PACK_MSG_OFF, 0);
564 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
566 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
568 req->rq_reqlen = newmsg_size;
572 /****************************************
574 ****************************************/
576 static struct ptlrpc_svc_ctx plain_svc_ctx = {
577 .sc_refcount = ATOMIC_INIT(1),
578 .sc_policy = &plain_policy,
582 int plain_accept(struct ptlrpc_request *req)
584 struct lustre_msg *msg = req->rq_reqbuf;
587 LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
589 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
590 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
594 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
595 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
599 req->rq_sp_from = plain_decode_sec_part(msg);
601 if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
602 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
603 CERROR("Mal-formed user descriptor\n");
607 req->rq_pack_udesc = 1;
608 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
611 if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
612 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
613 CERROR("Mal-formed bulk checksum request\n");
617 req->rq_pack_bulk = 1;
620 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
621 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
623 req->rq_svc_ctx = &plain_svc_ctx;
624 atomic_inc(&req->rq_svc_ctx->sc_refcount);
630 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
632 struct ptlrpc_reply_state *rs;
633 struct ptlrpc_bulk_sec_desc *bsd;
634 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
635 int rs_size = sizeof(*rs);
638 LASSERT(msgsize % 8 == 0);
640 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
642 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
643 bsd = lustre_msg_buf(req->rq_reqbuf,
644 PLAIN_PACK_BULK_OFF, sizeof(*bsd));
647 buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
648 bsd->bsd_hash_alg, 0,
651 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
653 rs = req->rq_reply_state;
657 LASSERT(rs->rs_size >= rs_size);
659 OBD_ALLOC(rs, rs_size);
663 rs->rs_size = rs_size;
666 rs->rs_svc_ctx = req->rq_svc_ctx;
667 atomic_inc(&req->rq_svc_ctx->sc_refcount);
668 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
669 rs->rs_repbuf_len = rs_size - sizeof(*rs);
671 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
672 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
674 req->rq_reply_state = rs;
679 void plain_free_rs(struct ptlrpc_reply_state *rs)
683 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
684 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
686 if (!rs->rs_prealloc)
687 OBD_FREE(rs, rs->rs_size);
692 int plain_authorize(struct ptlrpc_request *req)
694 struct ptlrpc_reply_state *rs = req->rq_reply_state;
695 struct lustre_msg_v2 *msg = rs->rs_repbuf;
702 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
703 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
706 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
708 msg->lm_secflvr = req->rq_flvr.sf_rpc;
709 if (req->rq_pack_bulk)
710 msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
712 rs->rs_repdata_len = len;
714 if (likely(req->rq_packed_final)) {
715 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
716 req->rq_reply_off = plain_at_offset;
718 req->rq_reply_off = 0;
720 msg->lm_cksum = crc32_le(!(__u32) 0,
721 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
722 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
723 req->rq_reply_off = 0;
730 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
731 struct ptlrpc_bulk_desc *desc)
733 struct ptlrpc_reply_state *rs = req->rq_reply_state;
736 LASSERT(req->rq_pack_bulk);
737 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
738 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
740 return bulk_csum_svc(desc, req->rq_bulk_read,
741 lustre_msg_buf(req->rq_reqbuf,
742 PLAIN_PACK_BULK_OFF, 0),
743 lustre_msg_buflen(req->rq_reqbuf,
744 PLAIN_PACK_BULK_OFF),
745 lustre_msg_buf(rs->rs_repbuf,
746 PLAIN_PACK_BULK_OFF, 0),
747 lustre_msg_buflen(rs->rs_repbuf,
748 PLAIN_PACK_BULK_OFF));
752 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
753 struct ptlrpc_bulk_desc *desc)
755 struct ptlrpc_reply_state *rs = req->rq_reply_state;
758 LASSERT(req->rq_pack_bulk);
759 LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
760 LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
762 return bulk_csum_svc(desc, req->rq_bulk_read,
763 lustre_msg_buf(req->rq_reqbuf,
764 PLAIN_PACK_BULK_OFF, 0),
765 lustre_msg_buflen(req->rq_reqbuf,
766 PLAIN_PACK_BULK_OFF),
767 lustre_msg_buf(rs->rs_repbuf,
768 PLAIN_PACK_BULK_OFF, 0),
769 lustre_msg_buflen(rs->rs_repbuf,
770 PLAIN_PACK_BULK_OFF));
773 static struct ptlrpc_ctx_ops plain_ctx_ops = {
774 .refresh = plain_ctx_refresh,
775 .validate = plain_ctx_validate,
776 .sign = plain_ctx_sign,
777 .verify = plain_ctx_verify,
778 .wrap_bulk = plain_cli_wrap_bulk,
779 .unwrap_bulk = plain_cli_unwrap_bulk,
782 static struct ptlrpc_sec_cops plain_sec_cops = {
783 .create_sec = plain_create_sec,
784 .destroy_sec = plain_destroy_sec,
785 .kill_sec = plain_kill_sec,
786 .lookup_ctx = plain_lookup_ctx,
787 .release_ctx = plain_release_ctx,
788 .flush_ctx_cache = plain_flush_ctx_cache,
789 .alloc_reqbuf = plain_alloc_reqbuf,
790 .alloc_repbuf = plain_alloc_repbuf,
791 .free_reqbuf = plain_free_reqbuf,
792 .free_repbuf = plain_free_repbuf,
793 .enlarge_reqbuf = plain_enlarge_reqbuf,
796 static struct ptlrpc_sec_sops plain_sec_sops = {
797 .accept = plain_accept,
798 .alloc_rs = plain_alloc_rs,
799 .authorize = plain_authorize,
800 .free_rs = plain_free_rs,
801 .unwrap_bulk = plain_svc_unwrap_bulk,
802 .wrap_bulk = plain_svc_wrap_bulk,
805 static struct ptlrpc_sec_policy plain_policy = {
806 .sp_owner = THIS_MODULE,
808 .sp_policy = SPTLRPC_POLICY_PLAIN,
809 .sp_cops = &plain_sec_cops,
810 .sp_sops = &plain_sec_sops,
813 int sptlrpc_plain_init(void)
815 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
818 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
819 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
821 rc = sptlrpc_register_policy(&plain_policy);
823 CERROR("failed to register: %d\n", rc);
828 void sptlrpc_plain_fini(void)
832 rc = sptlrpc_unregister_policy(&plain_policy);
834 CERROR("cannot unregister: %d\n", rc);