* Lustre Lite I/O page cache routines shared by different kernel revs
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
#include <lustre_lite.h>
#include <obd_cksum.h>
#include "llite_internal.h"
struct ll_inode_info *lli = ll_i2info(inode);
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu\n",inode->i_ino,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
inode->i_generation, inode, i_size_read(inode));
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
* so cl_io has to be created here.
*/
- io = &ccc_env_info(env)->cti_io;
+ io = ccc_env_thread_io(env);
vio = vvp_env_io(env);
ll_io_init(io, file, 1);
struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
- int result;
+ int result = 0;
ENTRY;
lcc = ll_cl_get();
io = lcc->lcc_io;
LASSERT(cl_page_is_owned(page, io));
- result = cl_io_commit_write(env, io, page, from, to);
+ LASSERT(from <= to);
+ if (from != to) /* handle short write case. */
+ result = cl_io_commit_write(env, io, page, from, to);
if (cl_page_is_owned(page, io))
cl_page_unassume(env, io, page);
+
/*
- * Release reference acquired by cl_io_prepare_write().
+ * Release reference acquired by ll_prepare_write().
*/
lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
cl_page_put(env, page);
* ll_ra_count_get at the exactly same time. All of them will get a zero ra
* window, although the global window is 100M. -jay
*/
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
+ unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
unsigned long ret;
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot.
*/
+ if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
+ GOTO(out, ret = 0);
+
ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
GOTO(out, ret = 0);
+ if (ria->ria_pages == 0)
+ /* it needs 1M align again after trimed by ra_max_pages*/
+ if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
+ ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+
if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
cfs_atomic_sub(ret, &ra->ra_cur_pages);
ret = 0;
}
+
out:
RETURN(ret);
}
if (len == 0)
RETURN(0);
- reserved = ll_ra_count_get(ll_i2sbi(inode), len);
-
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
if (reserved < len)
ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
return;
}
-int ll_writepage(struct page *vmpage, struct writeback_control *unused)
+int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
{
struct inode *inode = vmpage->mapping->host;
struct lu_env *env;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- io = &ccc_env_info(env)->cti_io;
queue = &vvp_env_info(env)->vti_queue;
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob != NULL);
+ io = ccc_env_thread_io(env);
io->ci_obj = clob;
result = cl_io_init(env, io, CIT_MISC, clob);
if (result == 0) {
cl_2queue_init_page(queue, page);
result = cl_io_submit_rw(env, io, CRT_WRITE,
queue, CRP_NORMAL);
- cl_page_list_disown(env, io, &queue->c2_qin);
if (result != 0) {
/*
- * There is no need to clear PG_writeback, as
- * cl_io_submit_rw() calls completion callback
- * on failure.
- */
- /*
* Re-dirty page on error so it retries write,
* but not in case when IO has actually
* occurred and completed with an error.
*/
- if (!PageError(vmpage))
- set_page_dirty(vmpage);
+ if (!PageError(vmpage)) {
+ redirty_page_for_writepage(wbc, vmpage);
+ result = 0;
+ }
}
+ cl_page_list_disown(env, io, &queue->c2_qin);
LASSERT(!cl_page_is_owned(page, io));
lu_ref_del(&page->cp_reference,
"writepage", cfs_current());
}
ll_cl_fini(lcc);
} else {
+ unlock_page(vmpage);
result = PTR_ERR(lcc);
}
RETURN(result);