* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Finalizes cl-data before exiting typical address_space operation. Dual to
* ll_cl_init().
*/
-static void ll_cl_fini(struct ll_cl_context *lcc)
+void ll_cl_fini(struct ll_cl_context *lcc)
{
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
cl_page_put(env, page);
}
- if (io && lcc->lcc_created) {
- cl_io_end(env, io);
- cl_io_unlock(env, io);
- cl_io_iter_fini(env, io);
- cl_io_fini(env, io);
- }
cl_env_put(env, &lcc->lcc_refcheck);
}
* Initializes common cl-data at the typical address_space operation entry
* point.
*/
-static struct ll_cl_context *ll_cl_init(struct file *file,
- struct page *vmpage, int create)
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
{
struct ll_cl_context *lcc;
struct lu_env *env;
int refcheck;
int result = 0;
- clob = ll_i2info(vmpage->mapping->host)->lli_clob;
- LASSERT(clob != NULL);
+ clob = ll_i2info(file->f_dentry->d_inode)->lli_clob;
+ LASSERT(clob != NULL);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
cio = ccc_env_io(env);
io = cio->cui_cl.cis_io;
- if (io == NULL && create) {
- struct inode *inode = vmpage->mapping->host;
- loff_t pos;
-
- if (mutex_trylock(&inode->i_mutex)) {
- mutex_unlock(&(inode)->i_mutex);
-
- /* this is too bad. Someone is trying to write the
- * page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate */
- CERROR("Proc %s is dirting page w/o inode lock, this"
- "will break truncate.\n", cfs_current()->comm);
- libcfs_debug_dumpstack(NULL);
- LBUG();
- return ERR_PTR(-EIO);
- }
-
- /*
- * Loop-back driver calls ->prepare_write() and ->sendfile()
- * methods directly, bypassing file system ->write() operation,
- * so cl_io has to be created here.
- */
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, 1);
-
- /* No lock at all for this kind of IO - we can't do it because
- * we have held page lock, it would cause deadlock.
- * XXX: This causes poor performance to loop device - One page
- * per RPC.
- * In order to get better performance, users should use
- * lloop driver instead.
- */
- io->ci_lockreq = CILR_NEVER;
-
- pos = (vmpage->index << CFS_PAGE_SHIFT);
-
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, CFS_PAGE_SIZE);
- if (result == 0) {
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iov = NULL;
- cio->cui_nrsegs = 0;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- result = cl_io_lock(env, io);
- if (result == 0)
- result = cl_io_start(env, io);
- }
- } else
- result = io->ci_result;
- lcc->lcc_created = 1;
- }
-
lcc->lcc_io = io;
- if (io == NULL)
- result = -EIO;
- if (result == 0) {
+ if (io == NULL)
+ result = -EIO;
+ if (result == 0 && vmpage != NULL) {
struct cl_page *page;
LASSERT(io != NULL);
lcc = ERR_PTR(result);
}
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
- vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
- env, io);
return lcc;
}
-static struct ll_cl_context *ll_cl_get(void)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- lcc = &vvp_env_info(env)->vti_io_ctx;
- LASSERT(env == lcc->lcc_env);
- LASSERT(current == lcc->lcc_cookie);
- cl_env_put(env, &refcheck);
-
- /* env has got in ll_cl_init, so it is still usable. */
- return lcc;
-}
-
-/**
- * ->prepare_write() address space operation called by generic_file_write()
- * for every page during write.
- */
-int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- int result;
- ENTRY;
-
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- cl_page_assume(env, io, page);
- if (cl_io_is_append(io)) {
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- /**
- * In VFS file->page write loop, for appending, the
- * write offset might be reset according to the new
- * file size before holding i_mutex. So crw_pos should
- * be reset here. BUG:17711.
- */
- io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- }
- result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- cfs_current());
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards */
- } else {
- result = PTR_ERR(lcc);
- }
- RETURN(result);
-}
-
-int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
- ENTRY;
-
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- RETURN(result);
-}
-
struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
{
__u64 opc;
* get a zero ra window, although there is still ra space remaining. - Jay */
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
- struct ra_io_arg *ria,
- unsigned long pages)
+ struct ra_io_arg *ria,
+ unsigned long pages, unsigned long min)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
/* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot. */
- ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), pages);
+ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
GOTO(out, ret = 0);
ret -= beyond_rpc;
}
- if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- cfs_atomic_sub(ret, &ra->ra_cur_pages);
- ret = 0;
- }
+ if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ atomic_sub(ret, &ra->ra_cur_pages);
+ ret = 0;
+ }
out:
- RETURN(ret);
+ if (ret < min) {
+ /* override ra limit for maximum performance */
+ atomic_add(min - ret, &ra->ra_cur_pages);
+ ret = min;
+ }
+ RETURN(ret);
}
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- cfs_atomic_sub(len, &ra->ra_cur_pages);
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ atomic_sub(len, &ra->ra_cur_pages);
}
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
{
- struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
- ll_ra_stats_inc_sbi(sbi, which);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ ll_ra_stats_inc_sbi(sbi, which);
}
#define RAS_CDEBUG(ras) \
ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
spin_unlock(&ras->ras_lock);
}
ras = ll_ras_get(f);
spin_lock(&ras->ras_lock);
- cfs_list_del_init(&rar->lrr_linkage);
+ list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
{
struct ll_ra_read *scan;
- cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
+ struct cl_page_list *queue, struct cl_page *page,
+ struct cl_object *clob, pgoff_t *max_index)
{
- struct ccc_page *cp;
- int rc;
+ struct page *vmpage = page->cp_vmpage;
+ struct ccc_page *cp;
+ int rc;
- ENTRY;
+ ENTRY;
- rc = 0;
- cl_page_assume(env, io, page);
- lu_ref_add(&page->cp_reference, "ra", cfs_current());
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ rc = 0;
+ cl_page_assume(env, io, page);
+ lu_ref_add(&page->cp_reference, "ra", current);
+ cp = cl2ccc_page(cl_object_page_slice(clob, page));
if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
- rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
- cl_page_list_add(queue, page);
- rc = 1;
- } else {
- cl_page_delete(env, page);
- rc = -ENOLCK;
- }
+ CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
+ ccc_index(cp), *max_index);
+ /* Disable the optimization on prefetching maximum readahead
+ * index because there is a race with lock cancellation. This
+ * optimization will be revived later.
+ * if (*max_index == 0 || ccc_index(cp) > *max_index) */
+ rc = cl_page_is_under_lock(env, io, page, max_index);
+ if (rc == 0) {
+ cp->cpg_defer_uptodate = 1;
+ cp->cpg_ra_used = 0;
+ cl_page_list_add(queue, page);
+ rc = 1;
+ } else {
+ cl_page_discard(env, io, page);
+ rc = -ENOLCK;
+ }
} else {
/* skip completed pages */
cl_page_unassume(env, io, page);
}
- lu_ref_del(&page->cp_reference, "ra", cfs_current());
- cl_page_put(env, page);
- RETURN(rc);
+ lu_ref_del(&page->cp_reference, "ra", current);
+ cl_page_put(env, page);
+ RETURN(rc);
}
/**
* \retval -ve, 0: page wasn't added to \a queue for other reason.
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue,
- pgoff_t index, struct address_space *mapping)
+ struct cl_page_list *queue,
+ pgoff_t index, pgoff_t *max_index)
{
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = ccc_object_inode(clob);
struct page *vmpage;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
struct cl_page *page;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
unsigned int gfp_mask;
#ifdef __GFP_NOWARN
gfp_mask |= __GFP_NOWARN;
#endif
- vmpage = grab_cache_page_nowait(mapping, index);
- if (vmpage != NULL) {
- /* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == mapping) {
- page = cl_page_find(env, clob, vmpage->index,
- vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- rc = cl_read_ahead_page(env, io, queue,
- page, vmpage);
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+ if (vmpage != NULL) {
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping == inode->i_mapping) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ rc = cl_read_ahead_page(env, io, queue,
+ page, clob, max_index);
if (rc == -ENOLCK) {
which = RA_STAT_FAILED_MATCH;
msg = "lock match failed";
which = RA_STAT_FAILED_GRAB_PAGE;
msg = "g_c_p_n failed";
}
- if (msg != NULL) {
- ll_ra_stats_inc(mapping, which);
- CDEBUG(D_READA, "%s\n", msg);
- }
- RETURN(rc);
+ if (msg != NULL) {
+ ll_ra_stats_inc(inode, which);
+ CDEBUG(D_READA, "%s\n", msg);
+ }
+ RETURN(rc);
}
#define RIA_DEBUG(ria) \
* sense to tune the i_blkbits value for the file based on the OSTs it is
* striped over, rather than having a constant value for all files here. */
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - CFS_PAGE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
* Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
* up quickly which will affect read performance siginificantly. See LU-2816 */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- struct address_space *mapping,
unsigned long *ra_end)
{
- int rc, count = 0, stride_ria;
- unsigned long page_idx;
-
- LASSERT(ria != NULL);
- RIA_DEBUG(ria);
-
- stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
- for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
- *reserved_pages > 0; page_idx++) {
- if (ras_inside_ra_window(page_idx, ria)) {
- /* If the page is inside the read-ahead window*/
- rc = ll_read_ahead_page(env, io, queue,
- page_idx, mapping);
+ int rc, count = 0;
+ bool stride_ria;
+ pgoff_t page_idx;
+ pgoff_t max_index = 0;
+
+ LASSERT(ria != NULL);
+ RIA_DEBUG(ria);
+
+ stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
+ for (page_idx = ria->ria_start;
+ page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
+ if (ras_inside_ra_window(page_idx, ria)) {
+ /* If the page is inside the read-ahead window*/
+ rc = ll_read_ahead_page(env, io, queue,
+ page_idx, &max_index);
if (rc == 1) {
(*reserved_pages)--;
- count ++;
+ count++;
} else if (rc == -ENOLCK)
break;
} else if (stride_ria) {
}
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags)
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit)
{
- struct vvp_io *vio = vvp_env_io(env);
- struct vvp_thread_info *vti = vvp_env_info(env);
- struct cl_attr *attr = ccc_env_thread_attr(env);
- unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
- struct inode *inode;
- struct ll_ra_read *bead;
- struct ra_io_arg *ria = &vti->vti_ria;
- struct ll_inode_info *lli;
- struct cl_object *clob;
- int ret = 0;
- __u64 kms;
- ENTRY;
+ struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_thread_info *vti = vvp_env_info(env);
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ unsigned long start = 0, end = 0, reserved;
+ unsigned long ra_end, len, mlen = 0;
+ struct inode *inode;
+ struct ll_ra_read *bead;
+ struct ra_io_arg *ria = &vti->vti_ria;
+ struct cl_object *clob;
+ int ret = 0;
+ __u64 kms;
+ ENTRY;
- inode = mapping->host;
- lli = ll_i2info(inode);
- clob = lli->lli_clob;
+ clob = io->ci_obj;
+ inode = ccc_object_inode(clob);
- memset(ria, 0, sizeof *ria);
+ memset(ria, 0, sizeof *ria);
- cl_object_attr_lock(clob);
- ret = cl_object_attr_get(env, clob, attr);
- cl_object_attr_unlock(clob);
+ cl_object_attr_lock(clob);
+ ret = cl_object_attr_get(env, clob, attr);
+ cl_object_attr_unlock(clob);
- if (ret != 0)
- RETURN(ret);
- kms = attr->cat_kms;
- if (kms == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
- RETURN(0);
- }
+ if (ret != 0)
+ RETURN(ret);
+ kms = attr->cat_kms;
+ if (kms == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
+ RETURN(0);
+ }
spin_lock(&ras->ras_lock);
if (vio->cui_ra_window_set)
end = rpc_boundary;
/* Truncate RA window to end of file */
- end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+ end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
}
spin_unlock(&ras->ras_lock);
- if (end == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
- RETURN(0);
- }
- len = ria_page_count(ria);
- if (len == 0)
- RETURN(0);
+ if (end == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
+ RETURN(0);
+ }
+ len = ria_page_count(ria);
+ if (len == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
+ RETURN(0);
+ }
- reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
- if (reserved < len)
- ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+ CDEBUG(D_READA, DFID": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
+ PFID(lu_object_fid(&clob->co_lu)),
+ ria->ria_start, ria->ria_end,
+ bead == NULL ? 0 : bead->lrr_start,
+ bead == NULL ? 0 : bead->lrr_count,
+ hit);
+
+ /* at least to extend the readahead window to cover current read */
+ if (!hit && bead != NULL &&
+ bead->lrr_start + bead->lrr_count > ria->ria_start) {
+ /* to the end of current read window. */
+ mlen = bead->lrr_start + bead->lrr_count - ria->ria_start;
+ /* trim to RPC boundary */
+ start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
+ mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
+ }
+
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
+ if (reserved < len)
+ ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
- cfs_atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
- ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
+ CDEBUG(D_READA, "reserved pages: %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
+ reserved, len, mlen,
+ atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
+ ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
- ret = ll_read_ahead_pages(env, io, queue,
- ria, &reserved, mapping, &ra_end);
+ ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end);
- LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
- if (reserved != 0)
- ll_ra_count_put(ll_i2sbi(inode), reserved);
+ if (reserved != 0)
+ ll_ra_count_put(ll_i2sbi(inode), reserved);
- if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
- ll_ra_stats_inc(mapping, RA_STAT_EOF);
+ if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+ ll_ra_stats_inc(inode, RA_STAT_EOF);
- /* if we didn't get to the end of the region we reserved from
- * the ras we need to go back and update the ras so that the
- * next read-ahead tries from where we left off. we only do so
- * if the region we failed to issue read-ahead on is still ahead
- * of the app and behind the next index to start read-ahead from */
- CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
- ra_end, end, ria->ria_end);
+ /* if we didn't get to the end of the region we reserved from
+ * the ras we need to go back and update the ras so that the
+ * next read-ahead tries from where we left off. we only do so
+ * if the region we failed to issue read-ahead on is still ahead
+ * of the app and behind the next index to start read-ahead from */
+ CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+ ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
+ ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+ INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
if (ras->ras_requests == 2 && !ras->ras_request_index) {
__u64 kms_pages;
- kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
- CFS_PAGE_SHIFT;
+ kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
ras->ras_last_readpage = index;
ras_set_start(inode, ras, index);
- if (stride_io_mode(ras))
+ if (stride_io_mode(ras)) {
/* Since stride readahead is sentivite to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned */
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- else
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
+ } else {
+ if (ras->ras_next_readahead < ras->ras_window_start)
+ ras->ras_next_readahead = ras->ras_window_start;
+ if (!hit)
+ ras->ras_next_readahead = index + 1;
+ }
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
if (result == 0) {
page = cl_page_find(env, clob, vmpage->index,
vmpage, CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lu_ref_add(&page->cp_reference, "writepage",
- cfs_current());
- cl_page_assume(env, io, page);
+ if (!IS_ERR(page)) {
+ lu_ref_add(&page->cp_reference, "writepage",
+ current);
+ cl_page_assume(env, io, page);
result = cl_page_flush(env, io, page);
if (result != 0) {
/*
}
cl_page_disown(env, io, page);
unlocked = true;
- lu_ref_del(&page->cp_reference,
- "writepage", cfs_current());
- cl_page_put(env, page);
+ lu_ref_del(&page->cp_reference,
+ "writepage", current);
+ cl_page_put(env, page);
} else {
result = PTR_ERR(page);
}
* breaking kernel which assumes ->writepage should mark
* PageWriteback or clean the page. */
result = cl_sync_file_range(inode, offset,
- offset + CFS_PAGE_SIZE - 1,
- CL_FSYNC_LOCAL);
+ offset + PAGE_CACHE_SIZE - 1,
+ CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
* decreasing this page because the caller will count
int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
loff_t start;
loff_t end;
enum cl_fsync_mode mode;
int range_whole = 0;
int result;
+ int ignore_layout = 0;
ENTRY;
if (wbc->range_cyclic) {
- start = mapping->writeback_index << CFS_PAGE_SHIFT;
+ start = mapping->writeback_index << PAGE_CACHE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
if (wbc->sync_mode == WB_SYNC_ALL)
mode = CL_FSYNC_LOCAL;
- result = cl_sync_file_range(inode, start, end, mode);
+ if (sbi->ll_umounting)
+ /* if the mountpoint is being umounted, all pages have to be
+ * evicted to avoid hitting LBUG when truncate_inode_pages()
+ * is called later on. */
+ ignore_layout = 1;
+
+ if (cl_i2info(inode)->lli_clob == NULL)
+ RETURN(0);
+
+ result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
if (result > 0) {
wbc->nr_to_write -= result;
result = 0;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
- end = i_size_read(inode);
- mapping->writeback_index = (end >> CFS_PAGE_SHIFT) + 1;
+ mapping->writeback_index = 0;
+ else
+ mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) +1;
}
RETURN(result);
}
int result;
ENTRY;
- lcc = ll_cl_init(file, vmpage, 0);
+ lcc = ll_cl_init(file, vmpage);
if (!IS_ERR(lcc)) {
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
result = cl_io_read_page(env, io, page);
} else {
/* Page from a non-object file. */
- LASSERT(!ll_i2info(vmpage->mapping->host)->lli_has_smd);
unlock_page(vmpage);
result = 0;
}
RETURN(result);
}
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt)
+{
+ struct cl_2queue *queue;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ queue = &io->ci_queue;
+ cl_2queue_init_page(queue, page);
+
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
+ LASSERT(cl_page_is_owned(page, io));
+
+ if (crt == CRT_READ)
+ /*
+ * in CRT_WRITE case page is left locked even in case of
+ * error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return result;
+}