X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fptlrpc%2Fsec_plain.c;h=dea70d160b54efd9ec6aa4d7c6d8c2fd668a7e64;hb=553d93361d2db4ff39bf19ac66dc2d79f6e3e324;hp=1b2f437061c8579a134893de83e4804dca362163;hpb=d00c924149b5dd042e6fee09ef724013df1b4e14;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index 1b2f437..dea70d1 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -159,14 +155,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) char *ptr; unsigned int off, i; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); + for (i = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_iov[i].kiov_len == 0) + if (BD_GET_KIOV(desc, i).kiov_len == 0) continue; - ptr = kmap(desc->bd_iov[i].kiov_page); - off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; + ptr = kmap(BD_GET_KIOV(desc, i).kiov_page); + off = BD_GET_KIOV(desc, i).kiov_offset & ~PAGE_MASK; ptr[off] ^= 0x1; - kunmap(desc->bd_iov[i].kiov_page); + kunmap(BD_GET_KIOV(desc, i).kiov_page); return; } } @@ -217,12 +215,12 @@ int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) static int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) { - struct lustre_msg *msg = req->rq_repdata; - struct plain_header *phdr; - __u32 cksum; - int swabbed; - ENTRY; + struct lustre_msg *msg = req->rq_repdata; + struct plain_header *phdr; + __u32 cksum; + bool swabbed; + ENTRY; if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) { CERROR("unexpected reply buf count %u\n", msg->lm_bufcount); RETURN(-EPROTO); @@ -341,6 +339,7 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, int rc; int i, nob; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(req->rq_pack_bulk); LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS); @@ -354,14 +353,15 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, return 0; } - /* fix the actual data size */ - for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) { - desc->bd_iov[i].kiov_len = - desc->bd_nob_transferred - nob; - } - nob += desc->bd_iov[i].kiov_len; - } + /* fix the actual data size */ + for (i = 0, nob = 0; i < desc->bd_iov_count; i++) { + if (BD_GET_KIOV(desc, i).kiov_len + + nob > desc->bd_nob_transferred) { + BD_GET_KIOV(desc, i).kiov_len = + desc->bd_nob_transferred - nob; + } + nob += BD_GET_KIOV(desc, i).kiov_len; + } rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, tokenv); @@ -566,15 +566,15 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec, alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens); - if (!req->rq_reqbuf) { - LASSERT(!req->rq_pool); + if (!req->rq_reqbuf) { + LASSERT(!req->rq_pool); - alloc_len = size_roundup_power2(alloc_len); - OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len); - if (!req->rq_reqbuf) - RETURN(-ENOMEM); + alloc_len = size_roundup_power2(alloc_len); + OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len); + if (!req->rq_reqbuf) + RETURN(-ENOMEM); - req->rq_reqbuf_len = alloc_len; + req->rq_reqbuf_len = alloc_len; } else { LASSERT(req->rq_pool); LASSERT(req->rq_reqbuf_len >= alloc_len); @@ -594,13 +594,13 @@ static void plain_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) { - ENTRY; - if (!req->rq_pool) { - OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); - req->rq_reqbuf = NULL; - req->rq_reqbuf_len = 0; - } - EXIT; + ENTRY; + if (!req->rq_pool) { + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); + req->rq_reqbuf = NULL; + req->rq_reqbuf_len = 0; + } + EXIT; } static @@ -627,12 +627,12 @@ int plain_alloc_repbuf(struct ptlrpc_sec *sec, alloc_len = size_roundup_power2(alloc_len); - OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len); - if (!req->rq_repbuf) - RETURN(-ENOMEM); + OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len); + if (!req->rq_repbuf) + RETURN(-ENOMEM); - req->rq_repbuf_len = alloc_len; - RETURN(0); + req->rq_repbuf_len = alloc_len; + RETURN(0); } static @@ -678,12 +678,12 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, /* request from pool should always have enough buffer */ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size); - if (req->rq_reqbuf_len < newbuf_size) { - newbuf_size = size_roundup_power2(newbuf_size); + if (req->rq_reqbuf_len < newbuf_size) { + newbuf_size = size_roundup_power2(newbuf_size); - OBD_ALLOC_LARGE(newbuf, newbuf_size); - if (newbuf == NULL) - RETURN(-ENOMEM); + OBD_ALLOC_LARGE(newbuf, newbuf_size); + if (newbuf == NULL) + RETURN(-ENOMEM); /* Must lock this, so that otherwise unprotected change of * rq_reqmsg is not racing with parallel processing of @@ -694,24 +694,24 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, if (req->rq_import) spin_lock(&req->rq_import->imp_lock); - memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); + memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); - OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); - req->rq_reqbuf = newbuf; - req->rq_reqbuf_len = newbuf_size; - req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, - PLAIN_PACK_MSG_OFF, 0); + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); + req->rq_reqbuf = newbuf; + req->rq_reqbuf_len = newbuf_size; + req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, + PLAIN_PACK_MSG_OFF, 0); if (req->rq_import) spin_unlock(&req->rq_import->imp_lock); - } + } - _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, - newmsg_size); - _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); + _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, + newmsg_size); + _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); - req->rq_reqlen = newmsg_size; - RETURN(0); + req->rq_reqlen = newmsg_size; + RETURN(0); } /**************************************** @@ -723,16 +723,15 @@ static struct ptlrpc_svc_ctx plain_svc_ctx = { .sc_policy = &plain_policy, }; -static -int plain_accept(struct ptlrpc_request *req) +static int plain_accept(struct ptlrpc_request *req) { - struct lustre_msg *msg = req->rq_reqbuf; - struct plain_header *phdr; - int swabbed; - ENTRY; + struct lustre_msg *msg = req->rq_reqbuf; + struct plain_header *phdr; + bool swabbed; - LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == - SPTLRPC_POLICY_PLAIN); + ENTRY; + LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == + SPTLRPC_POLICY_PLAIN); if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) != SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) || @@ -815,16 +814,16 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) rs = req->rq_reply_state; - if (rs) { - /* pre-allocated */ - LASSERT(rs->rs_size >= rs_size); - } else { - OBD_ALLOC_LARGE(rs, rs_size); - if (rs == NULL) - RETURN(-ENOMEM); + if (rs) { + /* pre-allocated */ + LASSERT(rs->rs_size >= rs_size); + } else { + OBD_ALLOC_LARGE(rs, rs_size); + if (rs == NULL) + RETURN(-ENOMEM); - rs->rs_size = rs_size; - } + rs->rs_size = rs_size; + } rs->rs_svc_ctx = req->rq_svc_ctx; atomic_inc(&req->rq_svc_ctx->sc_refcount); @@ -893,7 +892,7 @@ int plain_authorize(struct ptlrpc_request *req) lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0), lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF), NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize); - req->rq_reply_off = 0; + req->rq_reply_off = 0; } RETURN(0);