1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2004 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
25 #define DEBUG_SUBSYSTEM S_RPC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_lib.h>
35 #include <lustre_ha.h>
36 #include <lustre_import.h>
38 #include "ptlrpc_internal.h"
42 void ptlrpc_fill_bulk_md (lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
44 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
45 LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
47 md->options |= LNET_MD_KIOV;
48 md->start = &desc->bd_iov[0];
49 md->length = desc->bd_iov_count;
52 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
53 int pageoffset, int len)
55 lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
57 kiov->kiov_page = page;
58 kiov->kiov_offset = pageoffset;
64 void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
68 for (i = 0; i < desc->bd_iov_count ; i++) {
69 lnet_kiov_t *kiov = &desc->bd_iov[i];
70 memset(cfs_kmap(kiov->kiov_page)+kiov->kiov_offset, 0xab,
72 cfs_kunmap(kiov->kiov_page);
76 #else /* !__KERNEL__ */
78 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
80 LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
81 if (desc->bd_iov_count == 1) {
82 md->start = desc->bd_iov[0].iov_base;
83 md->length = desc->bd_iov[0].iov_len;
87 md->options |= LNET_MD_IOVEC;
88 md->start = &desc->bd_iov[0];
89 md->length = desc->bd_iov_count;
92 static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
94 if (existing->iov_base + existing->iov_len == candidate->iov_base)
97 /* Enable this section to provide earlier evidence of fragmented bulk */
98 CERROR("Can't merge iovs %p for %x, %p for %x\n",
99 existing->iov_base, existing->iov_len,
100 candidate->iov_base, candidate->iov_len);
105 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
106 int pageoffset, int len)
108 lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count];
110 iov->iov_base = page->addr + pageoffset;
113 if (desc->bd_iov_count > 0 && can_merge_iovs(iov - 1, iov)) {
114 (iov - 1)->iov_len += len;
116 desc->bd_iov_count++;
120 void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
124 for(i = 0; i < desc->bd_iov_count; i++) {
125 lnet_md_iovec_t *iov = &desc->bd_iov[i];
127 memset(iov->iov_base, 0xab, iov->iov_len);
130 #endif /* !__KERNEL__ */