X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fpers.c;h=7b27b77d254729a254765b71bbfdf9518eefa574;hb=6765d78583a7b144810e63ce8aa1c05d27cb8189;hp=200147739421a05d77e1e2d67a68160ba8149e91;hpb=0f8b7f951e7f43dbf389a431afea2091388a805e;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/pers.c b/lustre/ptlrpc/pers.c index 2001477..7b27b77 100644 --- a/lustre/ptlrpc/pers.c +++ b/lustre/ptlrpc/pers.c @@ -26,6 +26,8 @@ /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2014, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -33,11 +35,6 @@ */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#include -#include -#endif #include #include @@ -47,7 +44,6 @@ #include "ptlrpc_internal.h" -#ifdef __KERNEL__ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) @@ -59,74 +55,24 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); - md->options |= LNET_MD_KIOV; md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); - if (desc->bd_enc_iov) - md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; - else - md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; -} - -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, - int pageoffset, int len) -{ - lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; - - kiov->kiov_page = page; - kiov->kiov_offset = pageoffset; - kiov->kiov_len = len; - desc->bd_iov_count++; -} - -#else /* !__KERNEL__ */ - -void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, - int mdidx) -{ - LASSERT(mdidx < desc->bd_md_max_brw); - LASSERT(desc->bd_iov_count > mdidx * LNET_MAX_IOV); - LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); - - if (desc->bd_iov_count == 1) { - md->start = desc->bd_iov[0].iov_base; - md->length = desc->bd_iov[0].iov_len; - return; + if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { + md->options |= LNET_MD_KIOV; + if (GET_ENC_KIOV(desc)) + md->start = &BD_GET_ENC_KIOV(desc, mdidx * + LNET_MAX_IOV); + else + md->start = &BD_GET_KIOV(desc, mdidx * LNET_MAX_IOV); + } else if (ptlrpc_is_bulk_desc_kvec(desc->bd_type)) { + md->options |= LNET_MD_IOVEC; + if (GET_ENC_KVEC(desc)) + md->start = &BD_GET_ENC_KVEC(desc, mdidx * + LNET_MAX_IOV); + else + md->start = &BD_GET_KVEC(desc, mdidx * LNET_MAX_IOV); } - - md->options |= LNET_MD_IOVEC; - md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; - md->length = min(LNET_MAX_IOV, desc->bd_iov_count - mdidx * - LNET_MAX_IOV); -} - -static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate) -{ - if (existing->iov_base + existing->iov_len == candidate->iov_base) - return 1; -#if 0 - /* Enable this section to provide earlier evidence of fragmented bulk */ - CERROR("Can't merge iovs %p for %x, %p for %x\n", - existing->iov_base, existing->iov_len, - candidate->iov_base, candidate->iov_len); -#endif - return 0; } -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, - int pageoffset, int len) -{ - lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count]; - - iov->iov_base = page->addr + pageoffset; - iov->iov_len = len; - - if (desc->bd_iov_count > 0 && can_merge_iovs(iov - 1, iov)) { - (iov - 1)->iov_len += len; - } else { - desc->bd_iov_count++; - } -} -#endif /* !__KERNEL__ */