X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fpers.c;h=e899d796ff700d9477e1d0ebf98869479bf08dac;hp=5c78e1250a48c6d4a6b4f498a7438a3fcdd50d34;hb=917655fc2938b90a9c246dd2d58408c42aa1658d;hpb=e3a7c58aebafce40323db54bf6056029e5af4a70 diff --git a/lustre/ptlrpc/pers.c b/lustre/ptlrpc/pers.c index 5c78e12..e899d79 100644 --- a/lustre/ptlrpc/pers.c +++ b/lustre/ptlrpc/pers.c @@ -15,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -33,11 +31,6 @@ */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#include -#include -#endif #include #include @@ -47,75 +40,35 @@ #include "ptlrpc_internal.h" -#ifdef __KERNEL__ - -void ptlrpc_fill_bulk_md (lnet_md_t *md, struct ptlrpc_bulk_desc *desc) -{ - LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); - - md->options |= LNET_MD_KIOV; - md->length = desc->bd_iov_count; - if (desc->bd_enc_iov) - md->start = desc->bd_enc_iov; - else - md->start = desc->bd_iov; -} - -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, - int pageoffset, int len) -{ - lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; - - kiov->kiov_page = page; - kiov->kiov_offset = pageoffset; - kiov->kiov_len = len; - - desc->bd_iov_count++; -} - -#else /* !__KERNEL__ */ - -void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc) -{ - LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); - if (desc->bd_iov_count == 1) { - md->start = desc->bd_iov[0].iov_base; - md->length = desc->bd_iov[0].iov_len; - return; - } - - md->options |= LNET_MD_IOVEC; - md->start = &desc->bd_iov[0]; - md->length = desc->bd_iov_count; -} -static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate) +void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc, + int mdidx) { - if (existing->iov_base + existing->iov_len == candidate->iov_base) - return 1; -#if 0 - /* Enable this section to provide earlier evidence of fragmented bulk */ - CERROR("Can't merge iovs %p for %x, %p for %x\n", - existing->iov_base, existing->iov_len, - candidate->iov_base, candidate->iov_len); -#endif - return 0; + BUILD_BUG_ON(PTLRPC_MAX_BRW_PAGES >= LI_POISON); + + LASSERT(mdidx < desc->bd_md_max_brw); + LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); + LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | + LNET_MD_PHYS))); + + md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); + md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); + + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { + md->options |= LNET_MD_KIOV; + if (GET_ENC_KIOV(desc)) + md->start = &BD_GET_ENC_KIOV(desc, mdidx * + LNET_MAX_IOV); + else + md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV); + } else if (ptlrpc_is_bulk_desc_kvec(desc->bd_type)) { + md->options |= LNET_MD_IOVEC; + if (GET_ENC_KVEC(desc)) + md->start = &BD_GET_ENC_KVEC(desc, mdidx * + LNET_MAX_IOV); + else + md->start = &BD_GET_KVEC(desc, mdidx * LNET_MAX_IOV); + } } -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, - int pageoffset, int len) -{ - lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count]; - - iov->iov_base = page->addr + pageoffset; - iov->iov_len = len; - - if (desc->bd_iov_count > 0 && can_merge_iovs(iov - 1, iov)) { - (iov - 1)->iov_len += len; - } else { - desc->bd_iov_count++; - } -} -#endif /* !__KERNEL__ */