4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/sec_plain.c
34 * Author: Eric Mei <ericm@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_SEC
40 #include <obd_support.h>
41 #include <obd_cksum.h>
42 #include <obd_class.h>
43 #include <lustre_net.h>
44 #include <lustre_sec.h>
46 #include "ptlrpc_internal.h"
49 struct ptlrpc_sec pls_base;
51 struct ptlrpc_cli_ctx *pls_ctx;
54 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
56 return container_of(sec, struct plain_sec, pls_base);
59 static struct ptlrpc_sec_policy plain_policy;
60 static struct ptlrpc_ctx_ops plain_ctx_ops;
61 static struct ptlrpc_svc_ctx plain_svc_ctx;
63 static unsigned int plain_at_offset;
66 * for simplicity, plain policy rpc use fixed layout.
68 #define PLAIN_PACK_SEGMENTS (4)
70 #define PLAIN_PACK_HDR_OFF (0)
71 #define PLAIN_PACK_MSG_OFF (1)
72 #define PLAIN_PACK_USER_OFF (2)
73 #define PLAIN_PACK_BULK_OFF (3)
75 #define PLAIN_FL_USER (0x01)
76 #define PLAIN_FL_BULK (0x02)
81 __u8 ph_sp; /* source */
82 __u8 ph_bulk_hash_alg; /* complete flavor desc */
86 struct plain_bulk_token {
90 #define PLAIN_BSD_SIZE \
91 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
93 /****************************************
94 * bulk checksum helpers *
95 ****************************************/
97 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
99 struct ptlrpc_bulk_sec_desc *bsd;
101 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
104 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
106 CERROR("bulk sec desc has short size %d\n",
107 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
111 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
112 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
113 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
120 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
122 struct plain_bulk_token *token)
124 if (hash_alg == BULK_HASH_ALG_NULL)
127 memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
128 return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
129 sizeof(token->pbt_hash));
132 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
134 struct plain_bulk_token *tokenr)
136 struct plain_bulk_token tokenv;
139 if (hash_alg == BULK_HASH_ALG_NULL)
142 memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
143 rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
144 sizeof(tokenv.pbt_hash));
148 if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
153 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
158 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
160 for (i = 0; i < desc->bd_iov_count; i++) {
161 if (BD_GET_KIOV(desc, i).kiov_len == 0)
164 ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
165 off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
167 kunmap(BD_GET_KIOV(desc, i).kiov_page);
172 /****************************************
174 ****************************************/
177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
179 /* should never reach here */
185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
193 struct lustre_msg *msg = req->rq_reqbuf;
194 struct plain_header *phdr;
197 msg->lm_secflvr = req->rq_flvr.sf_rpc;
199 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
202 phdr->ph_sp = ctx->cc_sec->ps_part;
203 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
205 if (req->rq_pack_udesc)
206 phdr->ph_flags |= PLAIN_FL_USER;
207 if (req->rq_pack_bulk)
208 phdr->ph_flags |= PLAIN_FL_BULK;
210 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
216 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
218 struct lustre_msg *msg = req->rq_repdata;
219 struct plain_header *phdr;
224 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
225 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
229 swabbed = ptlrpc_rep_need_swab(req);
231 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
233 CERROR("missing plain header\n");
237 if (phdr->ph_ver != 0) {
238 CERROR("Invalid header version\n");
242 /* expect no user desc in reply */
243 if (phdr->ph_flags & PLAIN_FL_USER) {
244 CERROR("Unexpected udesc flag in reply\n");
248 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
249 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
250 req->rq_flvr.u_bulk.hash.hash_alg);
254 if (unlikely(req->rq_early)) {
255 unsigned int hsize = 4;
257 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
258 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
259 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
260 NULL, 0, (unsigned char *)&cksum, &hsize);
261 if (cksum != msg->lm_cksum) {
263 "early reply checksum mismatch: %08x != %08x\n",
264 cpu_to_le32(cksum), msg->lm_cksum);
268 /* whether we sent with bulk or not, we expect the same
269 * in reply, except for early reply */
270 if (!req->rq_early &&
271 !equi(req->rq_pack_bulk == 1,
272 phdr->ph_flags & PLAIN_FL_BULK)) {
273 CERROR("%s bulk checksum in reply\n",
274 req->rq_pack_bulk ? "Missing" : "Unexpected");
278 if (phdr->ph_flags & PLAIN_FL_BULK) {
279 if (plain_unpack_bsd(msg, swabbed))
284 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
285 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
290 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
291 struct ptlrpc_request *req,
292 struct ptlrpc_bulk_desc *desc)
294 struct ptlrpc_bulk_sec_desc *bsd;
295 struct plain_bulk_token *token;
298 LASSERT(req->rq_pack_bulk);
299 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
301 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
302 token = (struct plain_bulk_token *) bsd->bsd_data;
304 bsd->bsd_version = 0;
306 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
307 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
309 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
312 if (req->rq_bulk_read)
315 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
318 CERROR("bulk write: failed to compute checksum: %d\n", rc);
321 * for sending we only compute the wrong checksum instead
322 * of corrupting the data so it is still correct on a redo
324 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
325 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
326 token->pbt_hash[0] ^= 0x1;
333 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
334 struct ptlrpc_request *req,
335 struct ptlrpc_bulk_desc *desc)
337 struct ptlrpc_bulk_sec_desc *bsdv;
338 struct plain_bulk_token *tokenv;
342 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
343 LASSERT(req->rq_pack_bulk);
344 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
345 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
347 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
348 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
350 if (req->rq_bulk_write) {
351 if (bsdv->bsd_flags & BSD_FL_ERR)
356 /* fix the actual data size */
357 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
358 if (BD_GET_KIOV(desc, i).kiov_len +
359 nob > desc->bd_nob_transferred) {
360 BD_GET_KIOV(desc, i).kiov_len =
361 desc->bd_nob_transferred - nob;
363 nob += BD_GET_KIOV(desc, i).kiov_len;
366 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
369 CERROR("bulk read: client verify failed: %d\n", rc);
374 /****************************************
376 ****************************************/
379 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
381 struct ptlrpc_cli_ctx *ctx, *ctx_new;
383 OBD_ALLOC_PTR(ctx_new);
385 write_lock(&plsec->pls_lock);
387 ctx = plsec->pls_ctx;
389 atomic_inc(&ctx->cc_refcount);
392 OBD_FREE_PTR(ctx_new);
393 } else if (ctx_new) {
396 atomic_set(&ctx->cc_refcount, 1); /* for cache */
397 ctx->cc_sec = &plsec->pls_base;
398 ctx->cc_ops = &plain_ctx_ops;
400 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
401 ctx->cc_vcred.vc_uid = 0;
402 spin_lock_init(&ctx->cc_lock);
403 INIT_LIST_HEAD(&ctx->cc_req_list);
404 INIT_LIST_HEAD(&ctx->cc_gc_chain);
406 plsec->pls_ctx = ctx;
407 atomic_inc(&plsec->pls_base.ps_nctx);
408 atomic_inc(&plsec->pls_base.ps_refcount);
410 atomic_inc(&ctx->cc_refcount); /* for caller */
413 write_unlock(&plsec->pls_lock);
419 void plain_destroy_sec(struct ptlrpc_sec *sec)
421 struct plain_sec *plsec = sec2plsec(sec);
424 LASSERT(sec->ps_policy == &plain_policy);
425 LASSERT(sec->ps_import);
426 LASSERT(atomic_read(&sec->ps_refcount) == 0);
427 LASSERT(atomic_read(&sec->ps_nctx) == 0);
428 LASSERT(plsec->pls_ctx == NULL);
430 class_import_put(sec->ps_import);
437 void plain_kill_sec(struct ptlrpc_sec *sec)
443 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
444 struct ptlrpc_svc_ctx *svc_ctx,
445 struct sptlrpc_flavor *sf)
447 struct plain_sec *plsec;
448 struct ptlrpc_sec *sec;
449 struct ptlrpc_cli_ctx *ctx;
452 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
454 OBD_ALLOC_PTR(plsec);
459 * initialize plain_sec
461 rwlock_init(&plsec->pls_lock);
462 plsec->pls_ctx = NULL;
464 sec = &plsec->pls_base;
465 sec->ps_policy = &plain_policy;
466 atomic_set(&sec->ps_refcount, 0);
467 atomic_set(&sec->ps_nctx, 0);
468 sec->ps_id = sptlrpc_get_next_secid();
469 sec->ps_import = class_import_get(imp);
471 spin_lock_init(&sec->ps_lock);
472 INIT_LIST_HEAD(&sec->ps_gc_list);
473 sec->ps_gc_interval = 0;
476 /* install ctx immediately if this is a reverse sec */
478 ctx = plain_sec_install_ctx(plsec);
480 plain_destroy_sec(sec);
483 sptlrpc_cli_ctx_put(ctx, 1);
490 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
491 struct vfs_cred *vcred,
492 int create, int remove_dead)
494 struct plain_sec *plsec = sec2plsec(sec);
495 struct ptlrpc_cli_ctx *ctx;
498 read_lock(&plsec->pls_lock);
499 ctx = plsec->pls_ctx;
501 atomic_inc(&ctx->cc_refcount);
502 read_unlock(&plsec->pls_lock);
504 if (unlikely(ctx == NULL))
505 ctx = plain_sec_install_ctx(plsec);
511 void plain_release_ctx(struct ptlrpc_sec *sec,
512 struct ptlrpc_cli_ctx *ctx, int sync)
514 LASSERT(atomic_read(&sec->ps_refcount) > 0);
515 LASSERT(atomic_read(&sec->ps_nctx) > 0);
516 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
517 LASSERT(ctx->cc_sec == sec);
521 atomic_dec(&sec->ps_nctx);
522 sptlrpc_sec_put(sec);
526 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
527 uid_t uid, int grace, int force)
529 struct plain_sec *plsec = sec2plsec(sec);
530 struct ptlrpc_cli_ctx *ctx;
533 /* do nothing unless caller want to flush for 'all' */
537 write_lock(&plsec->pls_lock);
538 ctx = plsec->pls_ctx;
539 plsec->pls_ctx = NULL;
540 write_unlock(&plsec->pls_lock);
543 sptlrpc_cli_ctx_put(ctx, 1);
548 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
549 struct ptlrpc_request *req,
552 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
556 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
557 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
559 if (req->rq_pack_udesc)
560 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
562 if (req->rq_pack_bulk) {
563 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
564 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
567 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
569 if (!req->rq_reqbuf) {
570 LASSERT(!req->rq_pool);
572 alloc_len = size_roundup_power2(alloc_len);
573 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
577 req->rq_reqbuf_len = alloc_len;
579 LASSERT(req->rq_pool);
580 LASSERT(req->rq_reqbuf_len >= alloc_len);
581 memset(req->rq_reqbuf, 0, alloc_len);
584 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
585 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
587 if (req->rq_pack_udesc)
588 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
594 void plain_free_reqbuf(struct ptlrpc_sec *sec,
595 struct ptlrpc_request *req)
599 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
600 req->rq_reqbuf = NULL;
601 req->rq_reqbuf_len = 0;
607 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
608 struct ptlrpc_request *req,
611 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
615 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
616 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
618 if (req->rq_pack_bulk) {
619 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
620 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
623 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
625 /* add space for early reply */
626 alloc_len += plain_at_offset;
628 alloc_len = size_roundup_power2(alloc_len);
630 OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
634 req->rq_repbuf_len = alloc_len;
639 void plain_free_repbuf(struct ptlrpc_sec *sec,
640 struct ptlrpc_request *req)
643 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
644 req->rq_repbuf = NULL;
645 req->rq_repbuf_len = 0;
650 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
651 struct ptlrpc_request *req,
652 int segment, int newsize)
654 struct lustre_msg *newbuf;
656 int newmsg_size, newbuf_size;
659 LASSERT(req->rq_reqbuf);
660 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
661 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
664 /* compute new embedded msg size. */
665 oldsize = req->rq_reqmsg->lm_buflens[segment];
666 req->rq_reqmsg->lm_buflens[segment] = newsize;
667 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
668 req->rq_reqmsg->lm_buflens);
669 req->rq_reqmsg->lm_buflens[segment] = oldsize;
671 /* compute new wrapper msg size. */
672 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
673 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
674 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
675 req->rq_reqbuf->lm_buflens);
676 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
678 /* request from pool should always have enough buffer */
679 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
681 if (req->rq_reqbuf_len < newbuf_size) {
682 newbuf_size = size_roundup_power2(newbuf_size);
684 OBD_ALLOC_LARGE(newbuf, newbuf_size);
688 /* Must lock this, so that otherwise unprotected change of
689 * rq_reqmsg is not racing with parallel processing of
690 * imp_replay_list traversing threads. See LU-3333
691 * This is a bandaid at best, we really need to deal with this
692 * in request enlarging code before unpacking that's already
695 spin_lock(&req->rq_import->imp_lock);
697 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
699 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
700 req->rq_reqbuf = newbuf;
701 req->rq_reqbuf_len = newbuf_size;
702 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
703 PLAIN_PACK_MSG_OFF, 0);
706 spin_unlock(&req->rq_import->imp_lock);
709 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
711 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
713 req->rq_reqlen = newmsg_size;
717 /****************************************
719 ****************************************/
721 static struct ptlrpc_svc_ctx plain_svc_ctx = {
722 .sc_refcount = ATOMIC_INIT(1),
723 .sc_policy = &plain_policy,
727 int plain_accept(struct ptlrpc_request *req)
729 struct lustre_msg *msg = req->rq_reqbuf;
730 struct plain_header *phdr;
734 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
735 SPTLRPC_POLICY_PLAIN);
737 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
738 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
739 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
740 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
741 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
745 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
746 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
750 swabbed = ptlrpc_req_need_swab(req);
752 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
754 CERROR("missing plain header\n");
758 if (phdr->ph_ver != 0) {
759 CERROR("Invalid header version\n");
763 if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
764 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
768 req->rq_sp_from = phdr->ph_sp;
769 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
771 if (phdr->ph_flags & PLAIN_FL_USER) {
772 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
774 CERROR("Mal-formed user descriptor\n");
778 req->rq_pack_udesc = 1;
779 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
782 if (phdr->ph_flags & PLAIN_FL_BULK) {
783 if (plain_unpack_bsd(msg, swabbed))
786 req->rq_pack_bulk = 1;
789 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
790 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
792 req->rq_svc_ctx = &plain_svc_ctx;
793 atomic_inc(&req->rq_svc_ctx->sc_refcount);
799 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
801 struct ptlrpc_reply_state *rs;
802 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
803 int rs_size = sizeof(*rs);
806 LASSERT(msgsize % 8 == 0);
808 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
809 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
811 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
812 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
814 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
816 rs = req->rq_reply_state;
820 LASSERT(rs->rs_size >= rs_size);
822 OBD_ALLOC_LARGE(rs, rs_size);
826 rs->rs_size = rs_size;
829 rs->rs_svc_ctx = req->rq_svc_ctx;
830 atomic_inc(&req->rq_svc_ctx->sc_refcount);
831 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
832 rs->rs_repbuf_len = rs_size - sizeof(*rs);
834 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
835 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
837 req->rq_reply_state = rs;
842 void plain_free_rs(struct ptlrpc_reply_state *rs)
846 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
847 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
849 if (!rs->rs_prealloc)
850 OBD_FREE_LARGE(rs, rs->rs_size);
855 int plain_authorize(struct ptlrpc_request *req)
857 struct ptlrpc_reply_state *rs = req->rq_reply_state;
858 struct lustre_msg_v2 *msg = rs->rs_repbuf;
859 struct plain_header *phdr;
866 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
867 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
870 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
872 msg->lm_secflvr = req->rq_flvr.sf_rpc;
874 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
877 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
879 if (req->rq_pack_bulk)
880 phdr->ph_flags |= PLAIN_FL_BULK;
882 rs->rs_repdata_len = len;
884 if (likely(req->rq_packed_final)) {
885 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
886 req->rq_reply_off = plain_at_offset;
888 req->rq_reply_off = 0;
890 unsigned int hsize = 4;
892 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
893 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
894 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
895 NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
896 req->rq_reply_off = 0;
903 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
904 struct ptlrpc_bulk_desc *desc)
906 struct ptlrpc_reply_state *rs = req->rq_reply_state;
907 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
908 struct plain_bulk_token *tokenr;
911 LASSERT(req->rq_bulk_write);
912 LASSERT(req->rq_pack_bulk);
914 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
915 tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
916 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
918 bsdv->bsd_version = 0;
919 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
920 bsdv->bsd_svc = bsdr->bsd_svc;
923 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
926 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
929 bsdv->bsd_flags |= BSD_FL_ERR;
930 CERROR("bulk write: server verify failed: %d\n", rc);
937 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
938 struct ptlrpc_bulk_desc *desc)
940 struct ptlrpc_reply_state *rs = req->rq_reply_state;
941 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
942 struct plain_bulk_token *tokenv;
945 LASSERT(req->rq_bulk_read);
946 LASSERT(req->rq_pack_bulk);
948 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
949 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
950 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
952 bsdv->bsd_version = 0;
953 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
954 bsdv->bsd_svc = bsdr->bsd_svc;
957 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
960 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
963 CERROR("bulk read: server failed to compute "
964 "checksum: %d\n", rc);
966 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
967 corrupt_bulk_data(desc);
973 static struct ptlrpc_ctx_ops plain_ctx_ops = {
974 .refresh = plain_ctx_refresh,
975 .validate = plain_ctx_validate,
976 .sign = plain_ctx_sign,
977 .verify = plain_ctx_verify,
978 .wrap_bulk = plain_cli_wrap_bulk,
979 .unwrap_bulk = plain_cli_unwrap_bulk,
982 static struct ptlrpc_sec_cops plain_sec_cops = {
983 .create_sec = plain_create_sec,
984 .destroy_sec = plain_destroy_sec,
985 .kill_sec = plain_kill_sec,
986 .lookup_ctx = plain_lookup_ctx,
987 .release_ctx = plain_release_ctx,
988 .flush_ctx_cache = plain_flush_ctx_cache,
989 .alloc_reqbuf = plain_alloc_reqbuf,
990 .free_reqbuf = plain_free_reqbuf,
991 .alloc_repbuf = plain_alloc_repbuf,
992 .free_repbuf = plain_free_repbuf,
993 .enlarge_reqbuf = plain_enlarge_reqbuf,
996 static struct ptlrpc_sec_sops plain_sec_sops = {
997 .accept = plain_accept,
998 .alloc_rs = plain_alloc_rs,
999 .authorize = plain_authorize,
1000 .free_rs = plain_free_rs,
1001 .unwrap_bulk = plain_svc_unwrap_bulk,
1002 .wrap_bulk = plain_svc_wrap_bulk,
1005 static struct ptlrpc_sec_policy plain_policy = {
1006 .sp_owner = THIS_MODULE,
1008 .sp_policy = SPTLRPC_POLICY_PLAIN,
1009 .sp_cops = &plain_sec_cops,
1010 .sp_sops = &plain_sec_sops,
1013 int sptlrpc_plain_init(void)
1015 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1018 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1019 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1021 rc = sptlrpc_register_policy(&plain_policy);
1023 CERROR("failed to register: %d\n", rc);
1028 void sptlrpc_plain_fini(void)
1032 rc = sptlrpc_unregister_policy(&plain_policy);
1034 CERROR("cannot unregister: %d\n", rc);