4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_plain.c
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
44 #include <liblustre.h>
47 #include <obd_support.h>
48 #include <obd_cksum.h>
49 #include <obd_class.h>
50 #include <lustre_net.h>
51 #include <lustre_sec.h>
54 struct ptlrpc_sec pls_base;
55 cfs_rwlock_t pls_lock;
56 struct ptlrpc_cli_ctx *pls_ctx;
59 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
61 return container_of(sec, struct plain_sec, pls_base);
64 static struct ptlrpc_sec_policy plain_policy;
65 static struct ptlrpc_ctx_ops plain_ctx_ops;
66 static struct ptlrpc_svc_ctx plain_svc_ctx;
68 static unsigned int plain_at_offset;
71 * for simplicity, plain policy rpc use fixed layout.
73 #define PLAIN_PACK_SEGMENTS (4)
75 #define PLAIN_PACK_HDR_OFF (0)
76 #define PLAIN_PACK_MSG_OFF (1)
77 #define PLAIN_PACK_USER_OFF (2)
78 #define PLAIN_PACK_BULK_OFF (3)
80 #define PLAIN_FL_USER (0x01)
81 #define PLAIN_FL_BULK (0x02)
86 __u8 ph_sp; /* source */
87 __u8 ph_bulk_hash_alg; /* complete flavor desc */
91 struct plain_bulk_token {
95 #define PLAIN_BSD_SIZE \
96 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
98 /****************************************
99 * bulk checksum helpers *
100 ****************************************/
102 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
104 struct ptlrpc_bulk_sec_desc *bsd;
106 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
109 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
111 CERROR("bulk sec desc has short size %d\n",
112 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
116 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
117 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
118 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
125 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
127 struct plain_bulk_token *token)
129 if (hash_alg == BULK_HASH_ALG_NULL)
132 memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
133 return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
134 sizeof(token->pbt_hash));
137 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
139 struct plain_bulk_token *tokenr)
141 struct plain_bulk_token tokenv;
144 if (hash_alg == BULK_HASH_ALG_NULL)
147 memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
148 rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
149 sizeof(tokenv.pbt_hash));
153 if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
159 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
164 for (i = 0; i < desc->bd_iov_count; i++) {
165 if (desc->bd_iov[i].kiov_len == 0)
168 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
169 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
171 cfs_kunmap(desc->bd_iov[i].kiov_page);
176 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
180 for (i = 0; i < desc->bd_iov_count; i++) {
181 if (desc->bd_iov[i].iov_len == 0)
184 ((char *)desc->bd_iov[i].iov_base)[i] ^= 0x1;
188 #endif /* __KERNEL__ */
190 /****************************************
192 ****************************************/
195 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
197 /* should never reach here */
203 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
209 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
211 struct lustre_msg *msg = req->rq_reqbuf;
212 struct plain_header *phdr;
215 msg->lm_secflvr = req->rq_flvr.sf_rpc;
217 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
220 phdr->ph_sp = ctx->cc_sec->ps_part;
221 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
223 if (req->rq_pack_udesc)
224 phdr->ph_flags |= PLAIN_FL_USER;
225 if (req->rq_pack_bulk)
226 phdr->ph_flags |= PLAIN_FL_BULK;
228 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
234 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
236 struct lustre_msg *msg = req->rq_repdata;
237 struct plain_header *phdr;
242 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
243 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
247 swabbed = ptlrpc_rep_need_swab(req);
249 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
251 CERROR("missing plain header\n");
255 if (phdr->ph_ver != 0) {
256 CERROR("Invalid header version\n");
260 /* expect no user desc in reply */
261 if (phdr->ph_flags & PLAIN_FL_USER) {
262 CERROR("Unexpected udesc flag in reply\n");
266 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
267 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
268 req->rq_flvr.u_bulk.hash.hash_alg);
272 if (unlikely(req->rq_early)) {
273 cksum = crc32_le(!(__u32) 0,
274 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
275 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
276 if (cksum != msg->lm_cksum) {
278 "early reply checksum mismatch: %08x != %08x\n",
279 cpu_to_le32(cksum), msg->lm_cksum);
283 /* whether we sent with bulk or not, we expect the same
284 * in reply, except for early reply */
285 if (!req->rq_early &&
286 !equi(req->rq_pack_bulk == 1,
287 phdr->ph_flags & PLAIN_FL_BULK)) {
288 CERROR("%s bulk checksum in reply\n",
289 req->rq_pack_bulk ? "Missing" : "Unexpected");
293 if (phdr->ph_flags & PLAIN_FL_BULK) {
294 if (plain_unpack_bsd(msg, swabbed))
299 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
300 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
305 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
306 struct ptlrpc_request *req,
307 struct ptlrpc_bulk_desc *desc)
309 struct ptlrpc_bulk_sec_desc *bsd;
310 struct plain_bulk_token *token;
313 LASSERT(req->rq_pack_bulk);
314 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
316 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
317 token = (struct plain_bulk_token *) bsd->bsd_data;
319 bsd->bsd_version = 0;
321 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
322 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
324 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
327 if (req->rq_bulk_read)
330 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
333 CERROR("bulk write: failed to compute checksum: %d\n", rc);
336 * for sending we only compute the wrong checksum instead
337 * of corrupting the data so it is still correct on a redo
339 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
340 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
341 token->pbt_hash[0] ^= 0x1;
348 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
349 struct ptlrpc_request *req,
350 struct ptlrpc_bulk_desc *desc)
352 struct ptlrpc_bulk_sec_desc *bsdv;
353 struct plain_bulk_token *tokenv;
359 LASSERT(req->rq_pack_bulk);
360 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
361 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
363 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
364 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
366 if (req->rq_bulk_write) {
367 if (bsdv->bsd_flags & BSD_FL_ERR)
373 /* fix the actual data size */
374 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
375 if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
376 desc->bd_iov[i].kiov_len =
377 desc->bd_nob_transferred - nob;
379 nob += desc->bd_iov[i].kiov_len;
383 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
386 CERROR("bulk read: client verify failed: %d\n", rc);
391 /****************************************
393 ****************************************/
396 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
398 struct ptlrpc_cli_ctx *ctx, *ctx_new;
400 OBD_ALLOC_PTR(ctx_new);
402 cfs_write_lock(&plsec->pls_lock);
404 ctx = plsec->pls_ctx;
406 cfs_atomic_inc(&ctx->cc_refcount);
409 OBD_FREE_PTR(ctx_new);
410 } else if (ctx_new) {
413 cfs_atomic_set(&ctx->cc_refcount, 1); /* for cache */
414 ctx->cc_sec = &plsec->pls_base;
415 ctx->cc_ops = &plain_ctx_ops;
417 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
418 ctx->cc_vcred.vc_uid = 0;
419 cfs_spin_lock_init(&ctx->cc_lock);
420 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
421 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
423 plsec->pls_ctx = ctx;
424 cfs_atomic_inc(&plsec->pls_base.ps_nctx);
425 cfs_atomic_inc(&plsec->pls_base.ps_refcount);
427 cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
430 cfs_write_unlock(&plsec->pls_lock);
436 void plain_destroy_sec(struct ptlrpc_sec *sec)
438 struct plain_sec *plsec = sec2plsec(sec);
441 LASSERT(sec->ps_policy == &plain_policy);
442 LASSERT(sec->ps_import);
443 LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
444 LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
445 LASSERT(plsec->pls_ctx == NULL);
447 class_import_put(sec->ps_import);
454 void plain_kill_sec(struct ptlrpc_sec *sec)
460 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
461 struct ptlrpc_svc_ctx *svc_ctx,
462 struct sptlrpc_flavor *sf)
464 struct plain_sec *plsec;
465 struct ptlrpc_sec *sec;
466 struct ptlrpc_cli_ctx *ctx;
469 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
471 OBD_ALLOC_PTR(plsec);
476 * initialize plain_sec
478 cfs_rwlock_init(&plsec->pls_lock);
479 plsec->pls_ctx = NULL;
481 sec = &plsec->pls_base;
482 sec->ps_policy = &plain_policy;
483 cfs_atomic_set(&sec->ps_refcount, 0);
484 cfs_atomic_set(&sec->ps_nctx, 0);
485 sec->ps_id = sptlrpc_get_next_secid();
486 sec->ps_import = class_import_get(imp);
488 cfs_spin_lock_init(&sec->ps_lock);
489 CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
490 sec->ps_gc_interval = 0;
493 /* install ctx immediately if this is a reverse sec */
495 ctx = plain_sec_install_ctx(plsec);
497 plain_destroy_sec(sec);
500 sptlrpc_cli_ctx_put(ctx, 1);
507 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
508 struct vfs_cred *vcred,
509 int create, int remove_dead)
511 struct plain_sec *plsec = sec2plsec(sec);
512 struct ptlrpc_cli_ctx *ctx;
515 cfs_read_lock(&plsec->pls_lock);
516 ctx = plsec->pls_ctx;
518 cfs_atomic_inc(&ctx->cc_refcount);
519 cfs_read_unlock(&plsec->pls_lock);
521 if (unlikely(ctx == NULL))
522 ctx = plain_sec_install_ctx(plsec);
528 void plain_release_ctx(struct ptlrpc_sec *sec,
529 struct ptlrpc_cli_ctx *ctx, int sync)
531 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
532 LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
533 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
534 LASSERT(ctx->cc_sec == sec);
538 cfs_atomic_dec(&sec->ps_nctx);
539 sptlrpc_sec_put(sec);
543 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
544 uid_t uid, int grace, int force)
546 struct plain_sec *plsec = sec2plsec(sec);
547 struct ptlrpc_cli_ctx *ctx;
550 /* do nothing unless caller want to flush for 'all' */
554 cfs_write_lock(&plsec->pls_lock);
555 ctx = plsec->pls_ctx;
556 plsec->pls_ctx = NULL;
557 cfs_write_unlock(&plsec->pls_lock);
560 sptlrpc_cli_ctx_put(ctx, 1);
565 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
566 struct ptlrpc_request *req,
569 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
573 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
574 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
576 if (req->rq_pack_udesc)
577 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
579 if (req->rq_pack_bulk) {
580 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
581 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
584 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
586 if (!req->rq_reqbuf) {
587 LASSERT(!req->rq_pool);
589 alloc_len = size_roundup_power2(alloc_len);
590 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
594 req->rq_reqbuf_len = alloc_len;
596 LASSERT(req->rq_pool);
597 LASSERT(req->rq_reqbuf_len >= alloc_len);
598 memset(req->rq_reqbuf, 0, alloc_len);
601 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
602 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
604 if (req->rq_pack_udesc)
605 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
611 void plain_free_reqbuf(struct ptlrpc_sec *sec,
612 struct ptlrpc_request *req)
616 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
617 req->rq_reqbuf = NULL;
618 req->rq_reqbuf_len = 0;
624 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
625 struct ptlrpc_request *req,
628 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
632 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
633 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
635 if (req->rq_pack_bulk) {
636 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
637 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
640 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
642 /* add space for early reply */
643 alloc_len += plain_at_offset;
645 alloc_len = size_roundup_power2(alloc_len);
647 OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
651 req->rq_repbuf_len = alloc_len;
656 void plain_free_repbuf(struct ptlrpc_sec *sec,
657 struct ptlrpc_request *req)
660 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
661 req->rq_repbuf = NULL;
662 req->rq_repbuf_len = 0;
667 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
668 struct ptlrpc_request *req,
669 int segment, int newsize)
671 struct lustre_msg *newbuf;
673 int newmsg_size, newbuf_size;
676 LASSERT(req->rq_reqbuf);
677 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
678 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
681 /* compute new embedded msg size. */
682 oldsize = req->rq_reqmsg->lm_buflens[segment];
683 req->rq_reqmsg->lm_buflens[segment] = newsize;
684 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
685 req->rq_reqmsg->lm_buflens);
686 req->rq_reqmsg->lm_buflens[segment] = oldsize;
688 /* compute new wrapper msg size. */
689 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
690 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
691 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
692 req->rq_reqbuf->lm_buflens);
693 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
695 /* request from pool should always have enough buffer */
696 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
698 if (req->rq_reqbuf_len < newbuf_size) {
699 newbuf_size = size_roundup_power2(newbuf_size);
701 OBD_ALLOC_LARGE(newbuf, newbuf_size);
705 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
707 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
708 req->rq_reqbuf = newbuf;
709 req->rq_reqbuf_len = newbuf_size;
710 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
711 PLAIN_PACK_MSG_OFF, 0);
714 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
716 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
718 req->rq_reqlen = newmsg_size;
722 /****************************************
724 ****************************************/
726 static struct ptlrpc_svc_ctx plain_svc_ctx = {
727 .sc_refcount = CFS_ATOMIC_INIT(1),
728 .sc_policy = &plain_policy,
732 int plain_accept(struct ptlrpc_request *req)
734 struct lustre_msg *msg = req->rq_reqbuf;
735 struct plain_header *phdr;
739 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
740 SPTLRPC_POLICY_PLAIN);
742 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
743 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
744 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
745 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
746 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
750 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
751 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
755 swabbed = ptlrpc_req_need_swab(req);
757 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
759 CERROR("missing plain header\n");
763 if (phdr->ph_ver != 0) {
764 CERROR("Invalid header version\n");
768 if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
769 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
773 req->rq_sp_from = phdr->ph_sp;
774 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
776 if (phdr->ph_flags & PLAIN_FL_USER) {
777 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
779 CERROR("Mal-formed user descriptor\n");
783 req->rq_pack_udesc = 1;
784 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
787 if (phdr->ph_flags & PLAIN_FL_BULK) {
788 if (plain_unpack_bsd(msg, swabbed))
791 req->rq_pack_bulk = 1;
794 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
795 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
797 req->rq_svc_ctx = &plain_svc_ctx;
798 cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
804 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
806 struct ptlrpc_reply_state *rs;
807 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
808 int rs_size = sizeof(*rs);
811 LASSERT(msgsize % 8 == 0);
813 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
814 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
816 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
817 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
819 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
821 rs = req->rq_reply_state;
825 LASSERT(rs->rs_size >= rs_size);
827 OBD_ALLOC_LARGE(rs, rs_size);
831 rs->rs_size = rs_size;
834 rs->rs_svc_ctx = req->rq_svc_ctx;
835 cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);
836 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
837 rs->rs_repbuf_len = rs_size - sizeof(*rs);
839 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
840 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
842 req->rq_reply_state = rs;
847 void plain_free_rs(struct ptlrpc_reply_state *rs)
851 LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
852 cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount);
854 if (!rs->rs_prealloc)
855 OBD_FREE_LARGE(rs, rs->rs_size);
860 int plain_authorize(struct ptlrpc_request *req)
862 struct ptlrpc_reply_state *rs = req->rq_reply_state;
863 struct lustre_msg_v2 *msg = rs->rs_repbuf;
864 struct plain_header *phdr;
871 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
872 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
875 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
877 msg->lm_secflvr = req->rq_flvr.sf_rpc;
879 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
882 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
884 if (req->rq_pack_bulk)
885 phdr->ph_flags |= PLAIN_FL_BULK;
887 rs->rs_repdata_len = len;
889 if (likely(req->rq_packed_final)) {
890 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
891 req->rq_reply_off = plain_at_offset;
893 req->rq_reply_off = 0;
895 msg->lm_cksum = crc32_le(!(__u32) 0,
896 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
897 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
898 req->rq_reply_off = 0;
905 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
906 struct ptlrpc_bulk_desc *desc)
908 struct ptlrpc_reply_state *rs = req->rq_reply_state;
909 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
910 struct plain_bulk_token *tokenr;
913 LASSERT(req->rq_bulk_write);
914 LASSERT(req->rq_pack_bulk);
916 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
917 tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
918 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
920 bsdv->bsd_version = 0;
921 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
922 bsdv->bsd_svc = bsdr->bsd_svc;
925 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
928 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
931 bsdv->bsd_flags |= BSD_FL_ERR;
932 CERROR("bulk write: server verify failed: %d\n", rc);
939 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
940 struct ptlrpc_bulk_desc *desc)
942 struct ptlrpc_reply_state *rs = req->rq_reply_state;
943 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
944 struct plain_bulk_token *tokenv;
947 LASSERT(req->rq_bulk_read);
948 LASSERT(req->rq_pack_bulk);
950 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
951 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
952 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
954 bsdv->bsd_version = 0;
955 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
956 bsdv->bsd_svc = bsdr->bsd_svc;
959 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
962 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
965 CERROR("bulk read: server failed to compute "
966 "checksum: %d\n", rc);
968 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
969 corrupt_bulk_data(desc);
975 static struct ptlrpc_ctx_ops plain_ctx_ops = {
976 .refresh = plain_ctx_refresh,
977 .validate = plain_ctx_validate,
978 .sign = plain_ctx_sign,
979 .verify = plain_ctx_verify,
980 .wrap_bulk = plain_cli_wrap_bulk,
981 .unwrap_bulk = plain_cli_unwrap_bulk,
984 static struct ptlrpc_sec_cops plain_sec_cops = {
985 .create_sec = plain_create_sec,
986 .destroy_sec = plain_destroy_sec,
987 .kill_sec = plain_kill_sec,
988 .lookup_ctx = plain_lookup_ctx,
989 .release_ctx = plain_release_ctx,
990 .flush_ctx_cache = plain_flush_ctx_cache,
991 .alloc_reqbuf = plain_alloc_reqbuf,
992 .free_reqbuf = plain_free_reqbuf,
993 .alloc_repbuf = plain_alloc_repbuf,
994 .free_repbuf = plain_free_repbuf,
995 .enlarge_reqbuf = plain_enlarge_reqbuf,
998 static struct ptlrpc_sec_sops plain_sec_sops = {
999 .accept = plain_accept,
1000 .alloc_rs = plain_alloc_rs,
1001 .authorize = plain_authorize,
1002 .free_rs = plain_free_rs,
1003 .unwrap_bulk = plain_svc_unwrap_bulk,
1004 .wrap_bulk = plain_svc_wrap_bulk,
1007 static struct ptlrpc_sec_policy plain_policy = {
1008 .sp_owner = THIS_MODULE,
1010 .sp_policy = SPTLRPC_POLICY_PLAIN,
1011 .sp_cops = &plain_sec_cops,
1012 .sp_sops = &plain_sec_sops,
1015 int sptlrpc_plain_init(void)
1017 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1020 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1021 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1023 rc = sptlrpc_register_policy(&plain_policy);
1025 CERROR("failed to register: %d\n", rc);
1030 void sptlrpc_plain_fini(void)
1034 rc = sptlrpc_unregister_policy(&plain_policy);
1036 CERROR("cannot unregister: %d\n", rc);