-#ifdef __KERNEL__
-
-void ptlrpc_fill_bulk_md (lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
-{
- LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
-
- md->options |= LNET_MD_KIOV;
- md->start = &desc->bd_iov[0];
- md->length = desc->bd_iov_count;
-}
-
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
- int pageoffset, int len)
-{
- lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
-
- kiov->kiov_page = page;
- kiov->kiov_offset = pageoffset;
- kiov->kiov_len = len;
-
- desc->bd_iov_count++;
-}
-
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- for (i = 0; i < desc->bd_iov_count ; i++) {
- lnet_kiov_t *kiov = &desc->bd_iov[i];
- memset(cfs_kmap(kiov->kiov_page)+kiov->kiov_offset, 0xab,
- kiov->kiov_len);
- cfs_kunmap(kiov->kiov_page);
- }
-}
-
-#else /* !__KERNEL__ */
-
-void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
-{
- LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
- if (desc->bd_iov_count == 1) {
- md->start = desc->bd_iov[0].iov_base;
- md->length = desc->bd_iov[0].iov_len;
- return;
- }
-
- md->options |= LNET_MD_IOVEC;
- md->start = &desc->bd_iov[0];
- md->length = desc->bd_iov_count;
-}
-
-static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
-{
- if (existing->iov_base + existing->iov_len == candidate->iov_base)
- return 1;
-#if 0
- /* Enable this section to provide earlier evidence of fragmented bulk */
- CERROR("Can't merge iovs %p for %x, %p for %x\n",
- existing->iov_base, existing->iov_len,
- candidate->iov_base, candidate->iov_len);
-#endif
- return 0;
-}