X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fsec_null.c;h=1d02bc5be89cf8543879b7d1feca0e1cf11a3390;hb=545c662f634798d4ce97c25d6749aeba00d87be8;hp=52100aec18f69546da3b5ac746b08ec101ecbaab;hpb=30dda5e616718499f54d06a7e3723dea4c4a7e79;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/sec_null.c b/lustre/ptlrpc/sec_null.c index 52100ae..1d02bc5 100644 --- a/lustre/ptlrpc/sec_null.c +++ b/lustre/ptlrpc/sec_null.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,9 +38,6 @@ * Author: Eric Mei */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC #ifndef __KERNEL__ @@ -107,7 +104,7 @@ int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) if (req->rq_early) { cksums = lustre_msg_get_cksum(req->rq_repdata); -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0) +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_CKSUM_INCOMPAT18) cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 0); @@ -118,8 +115,9 @@ int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) cksumc = lustre_msg_calc_cksum(req->rq_repmsg); #endif if (cksumc != cksums) { - CWARN("early reply checksum mismatch: %08x != %08x\n", - cksumc, cksums); + CDEBUG(D_SEC, + "early reply checksum mismatch: %08x != %08x\n", + cksumc, cksums); return -EINVAL; } } @@ -152,8 +150,8 @@ struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec, struct vfs_cred *vcred, int create, int remove_dead) { - cfs_atomic_inc(&null_cli_ctx.cc_refcount); - return &null_cli_ctx; + atomic_inc(&null_cli_ctx.cc_refcount); + return &null_cli_ctx; } static @@ -173,7 +171,7 @@ int null_alloc_reqbuf(struct ptlrpc_sec *sec, int alloc_size = size_roundup_power2(msgsize); LASSERT(!req->rq_pool); - OBD_ALLOC(req->rq_reqbuf, alloc_size); + OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_size); if (!req->rq_reqbuf) return -ENOMEM; @@ -200,12 +198,10 @@ void null_free_reqbuf(struct ptlrpc_sec *sec, "req %p: reqlen %d should smaller than buflen %d\n", req, req->rq_reqlen, req->rq_reqbuf_len); - OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len); - req->rq_reqmsg = req->rq_reqbuf = NULL; + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); + req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } - - req->rq_reqmsg = NULL; } static @@ -218,7 +214,7 @@ int null_alloc_repbuf(struct ptlrpc_sec *sec, msgsize = size_roundup_power2(msgsize); - OBD_ALLOC(req->rq_repbuf, msgsize); + OBD_ALLOC_LARGE(req->rq_repbuf, msgsize); if (!req->rq_repbuf) return -ENOMEM; @@ -232,11 +228,9 @@ void null_free_repbuf(struct ptlrpc_sec *sec, { LASSERT(req->rq_repbuf); - OBD_FREE(req->rq_repbuf, req->rq_repbuf_len); + OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len); req->rq_repbuf = NULL; req->rq_repbuf_len = 0; - - req->rq_repmsg = NULL; } static @@ -265,15 +259,26 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, if (req->rq_reqbuf_len < newmsg_size) { alloc_size = size_roundup_power2(newmsg_size); - OBD_ALLOC(newbuf, alloc_size); + OBD_ALLOC_LARGE(newbuf, alloc_size); if (newbuf == NULL) return -ENOMEM; + /* Must lock this, so that otherwise unprotected change of + * rq_reqmsg is not racing with parallel processing of + * imp_replay_list traversing threads. See LU-3333 + * This is a bandaid at best, we really need to deal with this + * in request enlarging code before unpacking that's already + * there */ + if (req->rq_import) + spin_lock(&req->rq_import->imp_lock); memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); - OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len); + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); req->rq_reqbuf = req->rq_reqmsg = newbuf; req->rq_reqbuf_len = alloc_size; + + if (req->rq_import) + spin_unlock(&req->rq_import->imp_lock); } _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); @@ -283,30 +288,30 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, } static struct ptlrpc_svc_ctx null_svc_ctx = { - .sc_refcount = CFS_ATOMIC_INIT(1), + .sc_refcount = ATOMIC_INIT(1), .sc_policy = &null_policy, }; static int null_accept(struct ptlrpc_request *req) { - LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == - SPTLRPC_POLICY_NULL); + LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == + SPTLRPC_POLICY_NULL); - if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) { - CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc); - return SECSVC_DROP; - } + if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) { + CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc); + return SECSVC_DROP; + } - req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf); + req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf); - req->rq_reqmsg = req->rq_reqbuf; - req->rq_reqlen = req->rq_reqdata_len; + req->rq_reqmsg = req->rq_reqbuf; + req->rq_reqlen = req->rq_reqdata_len; - req->rq_svc_ctx = &null_svc_ctx; - cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount); + req->rq_svc_ctx = &null_svc_ctx; + atomic_inc(&req->rq_svc_ctx->sc_refcount); - return SECSVC_OK; + return SECSVC_OK; } static @@ -323,32 +328,32 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize) /* pre-allocated */ LASSERT(rs->rs_size >= rs_size); } else { - OBD_ALLOC(rs, rs_size); + OBD_ALLOC_LARGE(rs, rs_size); if (rs == NULL) return -ENOMEM; rs->rs_size = rs_size; } - rs->rs_svc_ctx = req->rq_svc_ctx; - cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount); + rs->rs_svc_ctx = req->rq_svc_ctx; + atomic_inc(&req->rq_svc_ctx->sc_refcount); - rs->rs_repbuf = (struct lustre_msg *) (rs + 1); - rs->rs_repbuf_len = rs_size - sizeof(*rs); - rs->rs_msg = rs->rs_repbuf; + rs->rs_repbuf = (struct lustre_msg *) (rs + 1); + rs->rs_repbuf_len = rs_size - sizeof(*rs); + rs->rs_msg = rs->rs_repbuf; - req->rq_reply_state = rs; - return 0; + req->rq_reply_state = rs; + return 0; } static void null_free_rs(struct ptlrpc_reply_state *rs) { - LASSERT(cfs_atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1); - cfs_atomic_dec(&rs->rs_svc_ctx->sc_refcount); + LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1); + atomic_dec(&rs->rs_svc_ctx->sc_refcount); - if (!rs->rs_prealloc) - OBD_FREE(rs, rs->rs_size); + if (!rs->rs_prealloc) + OBD_FREE_LARGE(rs, rs->rs_size); } static @@ -369,7 +374,7 @@ int null_authorize(struct ptlrpc_request *req) } else { __u32 cksum; -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0) +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_CKSUM_INCOMPAT18) cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 0); @@ -421,33 +426,33 @@ static struct ptlrpc_sec_policy null_policy = { static void null_init_internal(void) { - static CFS_HLIST_HEAD(__list); - - null_sec.ps_policy = &null_policy; - cfs_atomic_set(&null_sec.ps_refcount, 1); /* always busy */ - null_sec.ps_id = -1; - null_sec.ps_import = NULL; - null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL; - null_sec.ps_flvr.sf_flags = 0; - null_sec.ps_part = LUSTRE_SP_ANY; - null_sec.ps_dying = 0; - cfs_spin_lock_init(&null_sec.ps_lock); - cfs_atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */ - CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list); - null_sec.ps_gc_interval = 0; - null_sec.ps_gc_next = 0; - - cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list); - cfs_atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */ - null_cli_ctx.cc_sec = &null_sec; - null_cli_ctx.cc_ops = &null_ctx_ops; - null_cli_ctx.cc_expire = 0; - null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL | - PTLRPC_CTX_UPTODATE; - null_cli_ctx.cc_vcred.vc_uid = 0; - cfs_spin_lock_init(&null_cli_ctx.cc_lock); - CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list); - CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain); + static CFS_HLIST_HEAD(__list); + + null_sec.ps_policy = &null_policy; + atomic_set(&null_sec.ps_refcount, 1); /* always busy */ + null_sec.ps_id = -1; + null_sec.ps_import = NULL; + null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL; + null_sec.ps_flvr.sf_flags = 0; + null_sec.ps_part = LUSTRE_SP_ANY; + null_sec.ps_dying = 0; + spin_lock_init(&null_sec.ps_lock); + atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */ + CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list); + null_sec.ps_gc_interval = 0; + null_sec.ps_gc_next = 0; + + cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list); + atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */ + null_cli_ctx.cc_sec = &null_sec; + null_cli_ctx.cc_ops = &null_ctx_ops; + null_cli_ctx.cc_expire = 0; + null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL | + PTLRPC_CTX_UPTODATE; + null_cli_ctx.cc_vcred.vc_uid = 0; + spin_lock_init(&null_cli_ctx.cc_lock); + CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list); + CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain); } int sptlrpc_null_init(void)