-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Eric Mei <ericm@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <obd_cksum.h>
#include <lustre_net.h>
#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
struct plain_sec {
struct ptlrpc_sec pls_base;
- rwlock_t pls_lock;
+ rwlock_t pls_lock;
struct ptlrpc_cli_ctx *pls_ctx;
};
static unsigned int plain_at_offset;
/*
- * flavor flags (maximum 8 flags)
+ * for simplicity, plain policy rpc use fixed layout.
*/
-#define PLAIN_WFLVR_FLAGS_OFFSET (12)
-#define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
-#define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
+#define PLAIN_PACK_SEGMENTS (4)
+
+#define PLAIN_PACK_HDR_OFF (0)
+#define PLAIN_PACK_MSG_OFF (1)
+#define PLAIN_PACK_USER_OFF (2)
+#define PLAIN_PACK_BULK_OFF (3)
+
+#define PLAIN_FL_USER (0x01)
+#define PLAIN_FL_BULK (0x02)
+
+struct plain_header {
+ __u8 ph_ver; /* 0 */
+ __u8 ph_flags;
+ __u8 ph_sp; /* source */
+ __u8 ph_bulk_hash_alg; /* complete flavor desc */
+ __u8 ph_pad[4];
+};
-#define PLAIN_WFLVR_HAS_BULK(wflvr) \
- (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
-#define PLAIN_WFLVR_HAS_USER(wflvr) \
- (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
+struct plain_bulk_token {
+ __u8 pbt_hash[8];
+};
-#define PLAIN_WFLVR_TO_RPC(wflvr) \
- ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
+#define PLAIN_BSD_SIZE \
+ (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
-/*
- * similar to null sec, temporarily use the third byte of lm_secflvr to identify
- * the source sec part.
- */
-static inline
-void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
+/****************************************
+ * bulk checksum helpers *
+ ****************************************/
+
+static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
+ struct ptlrpc_bulk_sec_desc *bsd;
+
+ if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
+ return -EPROTO;
+
+ bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
+ if (bsd == NULL) {
+ CERROR("bulk sec desc has short size %d\n",
+ lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
+ return -EPROTO;
+ }
+
+ if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
+ CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
+ return -EPROTO;
+ }
+
+ return 0;
}
-static inline
-enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
+static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *token)
{
- return (msg->lm_secflvr >> 16) & 0xFF;
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
+
+ memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
+ return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
+ sizeof(token->pbt_hash));
}
-/*
- * for simplicity, plain policy rpc use fixed layout.
- */
-#define PLAIN_PACK_SEGMENTS (3)
+static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *tokenr)
+{
+ struct plain_bulk_token tokenv;
+ int rc;
-#define PLAIN_PACK_MSG_OFF (0)
-#define PLAIN_PACK_USER_OFF (1)
-#define PLAIN_PACK_BULK_OFF (2)
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
+
+ memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
+ rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
+ sizeof(tokenv.pbt_hash));
+ if (rc)
+ return rc;
+
+ if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
+ return -EACCES;
+ return 0;
+}
+
+static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
+{
+ char *ptr;
+ unsigned int off, i;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ if (BD_GET_KIOV(desc, i).kiov_len == 0)
+ continue;
+
+ ptr = kmap(BD_GET_KIOV(desc, i).kiov_page);
+ off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK;
+ ptr[off] ^= 0x1;
+ kunmap(BD_GET_KIOV(desc, i).kiov_page);
+ return;
+ }
+}
/****************************************
* cli_ctx apis *
static
int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
- struct lustre_msg_v2 *msg = req->rq_reqbuf;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
ENTRY;
msg->lm_secflvr = req->rq_flvr.sf_rpc;
- if (req->rq_pack_bulk)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
- if (req->rq_pack_udesc)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
- plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_sp = ctx->cc_sec->ps_part;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
+ if (req->rq_pack_udesc)
+ phdr->ph_flags |= PLAIN_FL_USER;
+ if (req->rq_pack_bulk)
+ phdr->ph_flags |= PLAIN_FL_BULK;
req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
msg->lm_buflens);
static
int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
- struct lustre_msg *msg = req->rq_repdata;
- int early = 0;
- __u32 cksum;
+ struct lustre_msg *msg = req->rq_repdata;
+ struct plain_header *phdr;
+ __u32 cksum;
+ int swabbed;
ENTRY;
if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
RETURN(-EPROTO);
}
- /* find out if it's an early reply */
- if ((char *) msg < req->rq_repbuf ||
- (char *) msg >= req->rq_repbuf + req->rq_repbuf_len)
- early = 1;
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ RETURN(-EPROTO);
+ }
+
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ RETURN(-EPROTO);
+ }
/* expect no user desc in reply */
- if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
+ if (phdr->ph_flags & PLAIN_FL_USER) {
CERROR("Unexpected udesc flag in reply\n");
RETURN(-EPROTO);
}
- if (unlikely(early)) {
- cksum = crc32_le(!(__u32) 0,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
- if (cksum != msg->lm_cksum) {
- CWARN("early reply checksum mismatch: %08x != %08x\n",
- cpu_to_le32(cksum), msg->lm_cksum);
- RETURN(-EINVAL);
- }
+ if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
+ CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
+ req->rq_flvr.u_bulk.hash.hash_alg);
+ RETURN(-EPROTO);
+ }
+
+ if (unlikely(req->rq_early)) {
+ unsigned int hsize = 4;
+
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
+ lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&cksum, &hsize);
+ if (cksum != msg->lm_cksum) {
+ CDEBUG(D_SEC,
+ "early reply checksum mismatch: %08x != %08x\n",
+ cpu_to_le32(cksum), msg->lm_cksum);
+ RETURN(-EINVAL);
+ }
} else {
/* whether we sent with bulk or not, we expect the same
* in reply, except for early reply */
- if (!early &&
+ if (!req->rq_early &&
!equi(req->rq_pack_bulk == 1,
- PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
+ phdr->ph_flags & PLAIN_FL_BULK)) {
CERROR("%s bulk checksum in reply\n",
req->rq_pack_bulk ? "Missing" : "Unexpected");
RETURN(-EPROTO);
}
- if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
- bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
- CERROR("Mal-formed bulk checksum reply\n");
- RETURN(-EINVAL);
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg, swabbed))
+ RETURN(-EPROTO);
}
}
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
+ struct ptlrpc_bulk_sec_desc *bsd;
+ struct plain_bulk_token *token;
+ int rc;
+
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_cli_request(desc, req->rq_bulk_read,
- req->rq_flvr.sf_bulk_hash,
- req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF);
+ bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ token = (struct plain_bulk_token *) bsd->bsd_data;
+
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ RETURN(0);
+
+ if (req->rq_bulk_read)
+ RETURN(0);
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ token);
+ if (rc) {
+ CERROR("bulk write: failed to compute checksum: %d\n", rc);
+ } else {
+ /*
+ * for sending we only compute the wrong checksum instead
+ * of corrupting the data so it is still correct on a redo
+ */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
+ req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
+ token->pbt_hash[0] ^= 0x1;
+ }
+
+ return rc;
}
static
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
+ struct ptlrpc_bulk_sec_desc *bsdv;
+ struct plain_bulk_token *tokenv;
+ int rc;
+ int i, nob;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_cli_reply(desc, req->rq_bulk_read,
- req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
- req->rq_repdata, PLAIN_PACK_BULK_OFF);
+ bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ if (req->rq_bulk_write) {
+ if (bsdv->bsd_flags & BSD_FL_ERR)
+ return -EIO;
+ return 0;
+ }
+
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (BD_GET_KIOV(desc, i).kiov_len +
+ nob > desc->bd_nob_transferred) {
+ BD_GET_KIOV(desc, i).kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += BD_GET_KIOV(desc, i).kiov_len;
+ }
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc)
+ CERROR("bulk read: client verify failed: %d\n", rc);
+
+ return rc;
}
/****************************************
static
struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
{
- struct ptlrpc_cli_ctx *ctx, *ctx_new;
+ struct ptlrpc_cli_ctx *ctx, *ctx_new;
- OBD_ALLOC_PTR(ctx_new);
+ OBD_ALLOC_PTR(ctx_new);
- write_lock(&plsec->pls_lock);
+ write_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx) {
- atomic_inc(&ctx->cc_refcount);
-
- if (ctx_new)
- OBD_FREE_PTR(ctx_new);
- } else if (ctx_new) {
- ctx = ctx_new;
-
- atomic_set(&ctx->cc_refcount, 1); /* for cache */
- ctx->cc_sec = &plsec->pls_base;
- ctx->cc_ops = &plain_ctx_ops;
- ctx->cc_expire = 0;
- ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
- ctx->cc_vcred.vc_uid = 0;
- spin_lock_init(&ctx->cc_lock);
- CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
- CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
-
- plsec->pls_ctx = ctx;
- atomic_inc(&plsec->pls_base.ps_nctx);
- atomic_inc(&plsec->pls_base.ps_refcount);
-
- atomic_inc(&ctx->cc_refcount); /* for caller */
- }
+ ctx = plsec->pls_ctx;
+ if (ctx) {
+ atomic_inc(&ctx->cc_refcount);
+
+ if (ctx_new)
+ OBD_FREE_PTR(ctx_new);
+ } else if (ctx_new) {
+ ctx = ctx_new;
+
+ atomic_set(&ctx->cc_refcount, 1); /* for cache */
+ ctx->cc_sec = &plsec->pls_base;
+ ctx->cc_ops = &plain_ctx_ops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
+ ctx->cc_vcred.vc_uid = 0;
+ spin_lock_init(&ctx->cc_lock);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
- write_unlock(&plsec->pls_lock);
+ plsec->pls_ctx = ctx;
+ atomic_inc(&plsec->pls_base.ps_nctx);
+ atomic_inc(&plsec->pls_base.ps_refcount);
- return ctx;
+ atomic_inc(&ctx->cc_refcount); /* for caller */
+ }
+
+ write_unlock(&plsec->pls_lock);
+
+ return ctx;
}
static
void plain_destroy_sec(struct ptlrpc_sec *sec)
{
- struct plain_sec *plsec = sec2plsec(sec);
- ENTRY;
+ struct plain_sec *plsec = sec2plsec(sec);
+ ENTRY;
- LASSERT(sec->ps_policy == &plain_policy);
- LASSERT(sec->ps_import);
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
- LASSERT(plsec->pls_ctx == NULL);
+ LASSERT(sec->ps_policy == &plain_policy);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(plsec->pls_ctx == NULL);
- class_import_put(sec->ps_import);
+ class_import_put(sec->ps_import);
- OBD_FREE_PTR(plsec);
- EXIT;
+ OBD_FREE_PTR(plsec);
+ EXIT;
}
static
struct ptlrpc_cli_ctx *ctx;
ENTRY;
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
-
- if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
- CERROR("plain policy don't support bulk cipher: %u\n",
- sf->sf_bulk_ciph);
- RETURN(NULL);
- }
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
OBD_ALLOC_PTR(plsec);
if (plsec == NULL)
/*
* initialize plain_sec
*/
- plsec->pls_lock = RW_LOCK_UNLOCKED;
- plsec->pls_ctx = NULL;
-
- sec = &plsec->pls_base;
- sec->ps_policy = &plain_policy;
- atomic_set(&sec->ps_refcount, 0);
- atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_import = class_import_get(imp);
- sec->ps_flvr = *sf;
- sec->ps_lock = SPIN_LOCK_UNLOCKED;
- CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
+ rwlock_init(&plsec->pls_lock);
+ plsec->pls_ctx = NULL;
+
+ sec = &plsec->pls_base;
+ sec->ps_policy = &plain_policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_import = class_import_get(imp);
+ sec->ps_flvr = *sf;
+ spin_lock_init(&sec->ps_lock);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
struct vfs_cred *vcred,
int create, int remove_dead)
{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
- ENTRY;
+ struct plain_sec *plsec = sec2plsec(sec);
+ struct ptlrpc_cli_ctx *ctx;
+ ENTRY;
- read_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx)
- atomic_inc(&ctx->cc_refcount);
- read_unlock(&plsec->pls_lock);
+ read_lock(&plsec->pls_lock);
+ ctx = plsec->pls_ctx;
+ if (ctx)
+ atomic_inc(&ctx->cc_refcount);
+ read_unlock(&plsec->pls_lock);
- if (unlikely(ctx == NULL))
- ctx = plain_sec_install_ctx(plsec);
+ if (unlikely(ctx == NULL))
+ ctx = plain_sec_install_ctx(plsec);
- RETURN(ctx);
+ RETURN(ctx);
}
static
void plain_release_ctx(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(ctx->cc_sec == sec);
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
- OBD_FREE_PTR(ctx);
+ OBD_FREE_PTR(ctx);
- atomic_dec(&sec->ps_nctx);
- sptlrpc_sec_put(sec);
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static
if (uid != -1)
RETURN(0);
- write_lock(&plsec->pls_lock);
+ write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
plsec->pls_ctx = NULL;
- write_unlock(&plsec->pls_lock);
+ write_unlock(&plsec->pls_lock);
if (ctx)
sptlrpc_cli_ctx_put(ctx, 1);
int msgsize)
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
+ int alloc_len;
ENTRY;
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
if (req->rq_pack_udesc)
if (req->rq_pack_bulk) {
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
}
alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
LASSERT(!req->rq_pool);
alloc_len = size_roundup_power2(alloc_len);
- OBD_ALLOC(req->rq_reqbuf, alloc_len);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
if (!req->rq_reqbuf)
RETURN(-ENOMEM);
}
lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
{
ENTRY;
if (!req->rq_pool) {
- OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
-
- req->rq_reqmsg = NULL;
EXIT;
}
int alloc_len;
ENTRY;
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
if (req->rq_pack_bulk) {
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
}
alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
alloc_len = size_roundup_power2(alloc_len);
- OBD_ALLOC(req->rq_repbuf, alloc_len);
+ OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
if (!req->rq_repbuf)
RETURN(-ENOMEM);
struct ptlrpc_request *req)
{
ENTRY;
- OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
+ OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
-
- req->rq_repmsg = NULL;
EXIT;
}
if (req->rq_reqbuf_len < newbuf_size) {
newbuf_size = size_roundup_power2(newbuf_size);
- OBD_ALLOC(newbuf, newbuf_size);
+ OBD_ALLOC_LARGE(newbuf, newbuf_size);
if (newbuf == NULL)
RETURN(-ENOMEM);
+ /* Must lock this, so that otherwise unprotected change of
+ * rq_reqmsg is not racing with parallel processing of
+ * imp_replay_list traversing threads. See LU-3333
+ * This is a bandaid at best, we really need to deal with this
+ * in request enlarging code before unpacking that's already
+ * there */
+ if (req->rq_import)
+ spin_lock(&req->rq_import->imp_lock);
+
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
- OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = newbuf;
req->rq_reqbuf_len = newbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
PLAIN_PACK_MSG_OFF, 0);
+
+ if (req->rq_import)
+ spin_unlock(&req->rq_import->imp_lock);
}
_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
****************************************/
static struct ptlrpc_svc_ctx plain_svc_ctx = {
- .sc_refcount = ATOMIC_INIT(1),
+ .sc_refcount = ATOMIC_INIT(1),
.sc_policy = &plain_policy,
};
static
int plain_accept(struct ptlrpc_request *req)
{
- struct lustre_msg *msg = req->rq_reqbuf;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
+ int swabbed;
ENTRY;
- LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_PLAIN);
+
+ if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
+ SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
+ CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ RETURN(SECSVC_DROP);
+ }
if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
RETURN(SECSVC_DROP);
}
- if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
- CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
- RETURN(SECSVC_DROP);
+ swabbed = ptlrpc_req_need_swab(req);
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ RETURN(-EPROTO);
}
- req->rq_sp_from = plain_decode_sec_part(msg);
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ RETURN(-EPROTO);
+ }
+
+ if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
+ CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
+ RETURN(-EPROTO);
+ }
- if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
- if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
+ req->rq_sp_from = phdr->ph_sp;
+ req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
+
+ if (phdr->ph_flags & PLAIN_FL_USER) {
+ if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
+ swabbed)) {
CERROR("Mal-formed user descriptor\n");
RETURN(SECSVC_DROP);
}
req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
}
- if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
- if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
- CERROR("Mal-formed bulk checksum request\n");
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg, swabbed))
RETURN(SECSVC_DROP);
- }
req->rq_pack_bulk = 1;
}
- req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
+ req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
+ req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
- req->rq_svc_ctx = &plain_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ req->rq_svc_ctx = &plain_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
- RETURN(SECSVC_OK);
+ RETURN(SECSVC_OK);
}
static
int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
{
struct ptlrpc_reply_state *rs;
- struct ptlrpc_bulk_sec_desc *bsd;
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int rs_size = sizeof(*rs);
ENTRY;
LASSERT(msgsize % 8 == 0);
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
- if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
- bsd = lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, sizeof(*bsd));
- LASSERT(bsd);
+ if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- bsd->bsd_hash_alg, 0,
- req->rq_bulk_read);
- }
rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
rs = req->rq_reply_state;
/* pre-allocated */
LASSERT(rs->rs_size >= rs_size);
} else {
- OBD_ALLOC(rs, rs_size);
+ OBD_ALLOC_LARGE(rs, rs_size);
if (rs == NULL)
RETURN(-ENOMEM);
rs->rs_size = rs_size;
}
- rs->rs_svc_ctx = req->rq_svc_ctx;
- atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
- rs->rs_repbuf_len = rs_size - sizeof(*rs);
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = rs_size - sizeof(*rs);
- lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
+ lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
+ rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
- req->rq_reply_state = rs;
- RETURN(0);
+ req->rq_reply_state = rs;
+ RETURN(0);
}
static
void plain_free_rs(struct ptlrpc_reply_state *rs)
{
- ENTRY;
+ ENTRY;
- LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
- atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+ LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ atomic_dec(&rs->rs_svc_ctx->sc_refcount);
- if (!rs->rs_prealloc)
- OBD_FREE(rs, rs->rs_size);
- EXIT;
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
+ EXIT;
}
static
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct lustre_msg_v2 *msg = rs->rs_repbuf;
+ struct plain_header *phdr;
int len;
ENTRY;
len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
msg->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
if (req->rq_pack_bulk)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
+ phdr->ph_flags |= PLAIN_FL_BULK;
rs->rs_repdata_len = len;
if (likely(req->rq_packed_final)) {
- req->rq_reply_off = plain_at_offset;
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = plain_at_offset;
+ else
+ req->rq_reply_off = 0;
} else {
- msg->lm_cksum = crc32_le(!(__u32) 0,
- lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
- lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF));
- req->rq_reply_off = 0;
+ unsigned int hsize = 4;
+
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
+ lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
+ req->rq_reply_off = 0;
}
RETURN(0);
int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenr;
+ int rc;
- LASSERT(rs);
+ LASSERT(req->rq_bulk_write);
LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
- LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- return bulk_csum_svc(desc, req->rq_bulk_read,
- lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF),
- lustre_msg_buf(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF));
+
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenr);
+ if (rc) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("bulk write: server verify failed: %d\n", rc);
+ }
+
+ return rc;
}
static
int plain_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenv;
+ int rc;
- LASSERT(rs);
+ LASSERT(req->rq_bulk_read);
LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
- LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
-
- return bulk_csum_svc(desc, req->rq_bulk_read,
- lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF),
- lustre_msg_buf(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF));
+
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc) {
+ CERROR("bulk read: server failed to compute "
+ "checksum: %d\n", rc);
+ } else {
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
+ corrupt_bulk_data(desc);
+ }
+
+ return rc;
}
static struct ptlrpc_ctx_ops plain_ctx_ops = {
.release_ctx = plain_release_ctx,
.flush_ctx_cache = plain_flush_ctx_cache,
.alloc_reqbuf = plain_alloc_reqbuf,
- .alloc_repbuf = plain_alloc_repbuf,
.free_reqbuf = plain_free_reqbuf,
+ .alloc_repbuf = plain_alloc_repbuf,
.free_repbuf = plain_free_repbuf,
.enlarge_reqbuf = plain_enlarge_reqbuf,
};