/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* must be called under ->lli_size_sem */
void ll_truncate(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu\n",inode->i_ino,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
inode->i_generation, inode, i_size_read(inode));
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
- if (lli->lli_size_sem_owner == cfs_current()) {
- LASSERT_SEM_LOCKED(&lli->lli_size_sem);
- ll_inode_size_unlock(inode, 0);
- }
EXIT;
return;
cio = ccc_env_io(env);
io = cio->cui_cl.cis_io;
if (io == NULL && create) {
- struct vvp_io *vio;
loff_t pos;
/*
* methods directly, bypassing file system ->write() operation,
* so cl_io has to be created here.
*/
-
io = ccc_env_thread_io(env);
- vio = vvp_env_io(env);
ll_io_init(io, file, 1);
/* No lock at all for this kind of IO - we can't do it because
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
-/* WARNING: This algorithm is used to reduce the contention on
- * sbi->ll_lock. It should work well if the ra_max_pages is much
- * greater than the single file's read-ahead window.
+/**
+ * Get readahead pages from the filesystem readahead pool of the client for a
+ * thread.
*
- * TODO: There may exist a `global sync problem' in this implementation.
- * Considering the global ra window is 100M, and each file's ra window is 10M,
- * there are over 10 files trying to get its ra budget and reach
- * ll_ra_count_get at the exactly same time. All of them will get a zero ra
- * window, although the global window is 100M. -jay
- */
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
- unsigned long len)
+ * /param sbi superblock for filesystem readahead state ll_ra_info
+ * /param ria per-thread readahead state
+ * /param pages number of pages requested for readahead for the thread.
+ *
+ * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
+ * It should work well if the ra_max_pages is much greater than the single
+ * file's read-ahead window, and not too many threads contending for
+ * these readahead pages.
+ *
+ * TODO: There may be a 'global sync problem' if many threads are trying
+ * to get an ra budget that is larger than the remaining readahead pages
+ * and reach here at exactly the same time. They will compute /a ret to
+ * consume the remaining pages, but will fail at atomic_add_return() and
+ * get a zero ra window, although there is still ra space remaining. - Jay */
+
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
+ struct ra_io_arg *ria,
+ unsigned long pages)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
- unsigned long ret;
+ long ret;
ENTRY;
- /**
- * If read-ahead pages left are less than 1M, do not do read-ahead,
+ /* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
- * performance a lot.
- */
- if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
+ * performance a lot. */
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), pages);
+ if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
GOTO(out, ret = 0);
- ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
- if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
- GOTO(out, ret = 0);
-
- if (ria->ria_pages == 0)
- /* it needs 1M align again after trimed by ra_max_pages*/
- if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
- ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+ /* If the non-strided (ria_pages == 0) readahead window
+ * (ria_start + ret) has grown across an RPC boundary, then trim
+ * readahead size by the amount beyond the RPC so it ends on an
+ * RPC boundary. If the readahead window is already ending on
+ * an RPC boundary (beyond_rpc == 0), or smaller than a full
+ * RPC (beyond_rpc < ret) the readahead size is unchanged.
+ * The (beyond_rpc != 0) check is skipped since the conditional
+ * branch is more expensive than subtracting zero from the result.
+ *
+ * Strided read is left unaligned to avoid small fragments beyond
+ * the RPC boundary from needing an extra read RPC. */
+ if (ria->ria_pages == 0) {
+ long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+ if (/* beyond_rpc != 0 && */ beyond_rpc < ret)
+ ret -= beyond_rpc;
+ }
if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
cfs_atomic_sub(ret, &ra->ra_cur_pages);
#ifdef __GFP_NOWARN
gfp_mask |= __GFP_NOWARN;
#endif
- vmpage = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
+ vmpage = grab_cache_page_nowait(mapping, index);
if (vmpage != NULL) {
/* Check if vmpage was truncated or reclaimed */
if (vmpage->mapping == mapping) {
end = ras->ras_window_start + ras->ras_window_len - 1;
}
if (end != 0) {
- unsigned long tmp_end;
+ unsigned long rpc_boundary;
/*
* Align RA window to an optimal boundary.
*
* be aligned to the RAID stripe size in the future and that
* is more important than the RPC size.
*/
- tmp_end = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1))) - 1;
- if (tmp_end > start)
- end = tmp_end;
+ /* Note: we only trim the RPC, instead of extending the RPC
+ * to the boundary, so to avoid reading too much pages during
+ * random reading. */
+ rpc_boundary = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)));
+ if (rpc_boundary > 0)
+ rpc_boundary--;
+
+ if (rpc_boundary > start)
+ end = rpc_boundary;
/* Truncate RA window to end of file */
end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
if (reserved < len)
ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu \n", reserved);
+ CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ cfs_atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
+ ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
ret = ll_read_ahead_pages(env, io, queue,
ria, &reserved, mapping, &ra_end);
return;
}
-int ll_writepage(struct page *vmpage, struct writeback_control *unused)
+int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
{
struct inode *inode = vmpage->mapping->host;
struct lu_env *env;
cl_2queue_init_page(queue, page);
result = cl_io_submit_rw(env, io, CRT_WRITE,
queue, CRP_NORMAL);
- cl_page_list_disown(env, io, &queue->c2_qin);
if (result != 0) {
/*
- * There is no need to clear PG_writeback, as
- * cl_io_submit_rw() calls completion callback
- * on failure.
- */
- /*
* Re-dirty page on error so it retries write,
* but not in case when IO has actually
* occurred and completed with an error.
*/
- if (!PageError(vmpage))
- set_page_dirty(vmpage);
+ if (!PageError(vmpage)) {
+ redirty_page_for_writepage(wbc, vmpage);
+ result = 0;
+ }
}
+ cl_page_list_disown(env, io, &queue->c2_qin);
LASSERT(!cl_page_is_owned(page, io));
lu_ref_del(&page->cp_reference,
"writepage", cfs_current());