obd_size left_space = filter_grant_space_left(exp);
struct filter_obd *filter = &exp->exp_obd->u.filter;
- /*Only if left_space < fo_tot_clients * 32M,
+ /*Only if left_space < fo_tot_clients * 32M,
*then the grant space could be shrinked */
- if (left_space < filter->fo_tot_granted_clients *
- FILTER_GRANT_SHRINK_LIMIT) {
+ if (left_space < filter->fo_tot_granted_clients *
+ FILTER_GRANT_SHRINK_LIMIT) {
fed->fed_grant -= oa->o_grant;
filter->fo_tot_granted -= oa->o_grant;
CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
* generate more memory pressure, but at the same time use __GFP_NOMEMALLOC
* in order not to exhaust emergency reserves.
*
- * See Bug 19529 and Bug 19917 for details.
+ * See Bug 19529 and Bug 19917 for details.
*/
static struct page *filter_get_page(struct obd_device *obd,
struct inode *inode,
struct page *page;
page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
- (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
+ (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
: (GFP_HIGHUSER | __GFP_NOMEMALLOC)));
if (unlikely(page == NULL))
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
RETURN(rc);
if (exp->exp_connection &&
- exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
+ exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
localreq = 1;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
* be able to proceed in filter_commitrw_write(). thus let's
* just wait for writeout completion, should be rare enough.
* -bzzz */
- if (obd->u.filter.fo_writethrough_cache)
- wait_on_page_writeback(lnb->page);
+ wait_on_page_writeback(lnb->page);
BUG_ON(PageWriteback(lnb->page));
/* If the filter writes a partial page, then has the file
if (lnb->page == NULL)
continue;
+ if (rc)
+ /* If the write has failed, the page cache may
+ * not be consitent with what is on disk, so
+ * force pages to be reread next time it is
+ * accessed */
+ ClearPageUptodate(lnb->page);
+
LASSERT(PageLocked(lnb->page));
unlock_page(lnb->page);
if (!ptlrpc_server_bulk_active(desc)) /* completed or */
return; /* never started */
- /* Do not send any meaningful data over the wire for evicted clients */
- if (desc->bd_export && desc->bd_export->exp_failed)
- ptl_rpc_wipe_bulk_pages(desc);
+ /* We used to poison the pages with 0xab here because we did not want to
+ * send any meaningful data over the wire for evicted clients (bug 9297)
+ * However, this is no longer safe now that we use the page cache on the
+ * OSS (bug 20560) */
/* The unlink ensures the callback happens ASAP and is the last
* one. If it fails, it must be because completion just happened,
desc->bd_iov_count++;
}
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- for (i = 0; i < desc->bd_iov_count ; i++) {
- lnet_kiov_t *kiov = &desc->bd_iov[i];
- memset(cfs_kmap(kiov->kiov_page)+kiov->kiov_offset, 0xab,
- kiov->kiov_len);
- cfs_kunmap(kiov->kiov_page);
- }
-}
-
#else /* !__KERNEL__ */
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
md->length = desc->bd_iov[0].iov_len;
return;
}
-
+
md->options |= LNET_MD_IOVEC;
md->start = &desc->bd_iov[0];
md->length = desc->bd_iov_count;
static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
{
- if (existing->iov_base + existing->iov_len == candidate->iov_base)
+ if (existing->iov_base + existing->iov_len == candidate->iov_base)
return 1;
#if 0
/* Enable this section to provide earlier evidence of fragmented bulk */
return 0;
}
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
int pageoffset, int len)
{
lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count];
}
}
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- for(i = 0; i < desc->bd_iov_count; i++) {
- lnet_md_iovec_t *iov = &desc->bd_iov[i];
-
- memset(iov->iov_base, 0xab, iov->iov_len);
- }
-}
#endif /* !__KERNEL__ */
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc);
void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
int pageoffset, int len);
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc);
/* pack_generic.c */
struct ptlrpc_reply_state *lustre_get_emerg_rs(struct ptlrpc_service *svc);