1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2004 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
25 #define DEBUG_SUBSYSTEM S_RPC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_lib.h>
35 #include <lustre_ha.h>
36 #include <lustre_import.h>
38 #include "ptlrpc_internal.h"
42 void ptlrpc_fill_bulk_md (lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
44 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
45 LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
47 md->options |= LNET_MD_KIOV;
48 md->start = desc->bd_enc_iov ? desc->bd_enc_iov : &desc->bd_iov[0];
49 md->length = desc->bd_iov_count;
52 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
53 int pageoffset, int len)
55 lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
57 kiov->kiov_page = page;
58 kiov->kiov_offset = pageoffset;
64 void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
68 for (i = 0; i < desc->bd_iov_count ; i++) {
69 lnet_kiov_t *kiov = &desc->bd_iov[i];
70 memset(cfs_kmap(kiov->kiov_page)+kiov->kiov_offset, 0xab,
72 cfs_kunmap(kiov->kiov_page);
76 int ptlrpc_bulk_alloc_enc_pages(struct ptlrpc_bulk_desc *desc)
80 LASSERT(desc->bd_enc_iov == NULL);
82 if (desc->bd_iov_count == 0)
85 alloc_size = desc->bd_iov_count * sizeof(desc->bd_enc_iov[0]);
87 OBD_ALLOC(desc->bd_enc_iov, alloc_size);
88 if (desc->bd_enc_iov == NULL)
91 memcpy(desc->bd_enc_iov, desc->bd_iov, alloc_size);
93 for (i = 0; i < desc->bd_iov_count; i++) {
94 desc->bd_enc_iov[i].kiov_page =
95 cfs_alloc_page(CFS_ALLOC_IO | CFS_ALLOC_HIGH);
96 if (desc->bd_enc_iov[i].kiov_page == NULL) {
97 CERROR("Failed to alloc %d encryption pages\n",
103 if (i == desc->bd_iov_count)
107 for (i = i - 1; i >= 0; i--)
108 __free_page(desc->bd_enc_iov[i].kiov_page);
109 OBD_FREE(desc->bd_enc_iov, alloc_size);
110 desc->bd_enc_iov = NULL;
114 void ptlrpc_bulk_free_enc_pages(struct ptlrpc_bulk_desc *desc)
118 if (desc->bd_enc_iov == NULL)
121 for (i = 0; i < desc->bd_iov_count; i++) {
122 LASSERT(desc->bd_enc_iov[i].kiov_page);
123 __free_page(desc->bd_enc_iov[i].kiov_page);
126 OBD_FREE(desc->bd_enc_iov,
127 desc->bd_iov_count * sizeof(desc->bd_enc_iov[0]));
128 desc->bd_enc_iov = NULL;
131 #else /* !__KERNEL__ */
133 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
135 LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
136 if (desc->bd_iov_count == 1) {
137 md->start = desc->bd_iov[0].iov_base;
138 md->length = desc->bd_iov[0].iov_len;
142 md->options |= LNET_MD_IOVEC;
143 md->start = &desc->bd_iov[0];
144 md->length = desc->bd_iov_count;
147 static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
149 if (existing->iov_base + existing->iov_len == candidate->iov_base)
152 /* Enable this section to provide earlier evidence of fragmented bulk */
153 CERROR("Can't merge iovs %p for %x, %p for %x\n",
154 existing->iov_base, existing->iov_len,
155 candidate->iov_base, candidate->iov_len);
160 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
161 int pageoffset, int len)
163 lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count];
165 iov->iov_base = page->addr + pageoffset;
168 if (desc->bd_iov_count > 0 && can_merge_iovs(iov - 1, iov)) {
169 (iov - 1)->iov_len += len;
171 desc->bd_iov_count++;
175 void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
179 for(i = 0; i < desc->bd_iov_count; i++) {
180 lnet_md_iovec_t *iov = &desc->bd_iov[i];
182 memset(iov->iov_base, 0xab, iov->iov_len);
186 int ptlrpc_bulk_alloc_enc_pages(struct ptlrpc_bulk_desc *desc)
190 void ptlrpc_bulk_free_enc_pages(struct ptlrpc_bulk_desc *desc)
193 #endif /* !__KERNEL__ */