/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Lustre Lite I/O page cache routines shared by different kernel revs
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/rw.c
+ *
+ * Lustre Lite I/O page cache routines shared by different kernel revs
*/
-#include <linux/config.h>
+#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <asm/uaccess.h>
-#include <asm/segment.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
+/* current_is_kswapd() */
+#include <linux/swap.h>
#define DEBUG_SUBSYSTEM S_LLITE
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
+//#include <lustre_mdc.h>
+#include <lustre_lite.h>
+#include <obd_cksum.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
-#ifndef list_for_each_prev_safe
-#define list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; pos != (head); \
- pos = n, n = pos->prev )
-#endif
-
-/* SYNCHRONOUS I/O to object storage for an inode */
-static int ll_brw(int cmd, struct inode *inode, struct obdo *oa,
- struct page *page, int flags)
+/* this isn't where truncate starts. roughly:
+ * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate. setattr_raw grabs
+ * DLM lock on [size, EOF], i_mutex, ->lli_size_sem, and WRITE_I_ALLOC_SEM to
+ * avoid races.
+ *
+ * must be called under ->lli_size_sem */
+void ll_truncate(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct brw_page pg;
- int rc;
ENTRY;
- pg.pg = page;
- pg.off = ((obd_off)page->index) << PAGE_SHIFT;
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu\n",inode->i_ino,
+ inode->i_generation, inode, i_size_read(inode));
- if (cmd == OBD_BRW_WRITE && (pg.off + PAGE_SIZE > inode->i_size))
- pg.count = inode->i_size % PAGE_SIZE;
- else
- pg.count = PAGE_SIZE;
-
- CDEBUG(D_PAGE, "%s %d bytes ino %lu at "LPU64"/"LPX64"\n",
- cmd & OBD_BRW_WRITE ? "write" : "read", pg.count, inode->i_ino,
- pg.off, pg.off);
- if (pg.count == 0) {
- CERROR("ZERO COUNT: ino %lu: size %p:%Lu(%p:%Lu) idx %lu off "
- LPU64"\n",
- inode->i_ino, inode, inode->i_size, page->mapping->host,
- page->mapping->host->i_size, page->index, pg.off);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
+ if (lli->lli_size_sem_owner == cfs_current()) {
+ LASSERT_SEM_LOCKED(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
}
- pg.flag = flags;
-
- if (cmd == OBD_BRW_WRITE)
- lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
- LPROC_LL_BRW_WRITE, pg.count);
- else
- lprocfs_counter_add(ll_i2sbi(inode)->ll_stats,
- LPROC_LL_BRW_READ, pg.count);
- rc = obd_brw(cmd, ll_i2obdexp(inode), oa, lsm, 1, &pg, NULL);
- if (rc == 0)
- obdo_to_inode(inode, oa, OBD_MD_FLBLOCKS);
- else if (rc != -EIO)
- CERROR("error from obd_brw: rc = %d\n", rc);
- RETURN(rc);
-}
+ EXIT;
+ return;
+} /* ll_truncate */
-/* this isn't where truncate starts. roughly:
- * sys_truncate->ll_setattr_raw->vmtruncate->ll_truncate
- * we grab the lock back in setattr_raw to avoid races. */
-void ll_truncate(struct inode *inode)
+/**
+ * Finalizes cl-data before exiting typical address_space operation. Dual to
+ * ll_cl_init().
+ */
+static void ll_cl_fini(struct ll_cl_context *lcc)
{
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- struct obdo oa;
- int rc;
- ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
- if (!lsm) {
- CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
- inode->i_ino);
- EXIT;
- return;
+ LASSERT(lcc->lcc_cookie == current);
+ LASSERT(env != NULL);
+
+ if (page != NULL) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
}
- oa.o_id = lsm->lsm_object_id;
- oa.o_valid = OBD_MD_FLID;
- obdo_from_inode(&oa, inode, OBD_MD_FLTYPE|OBD_MD_FLMODE|OBD_MD_FLATIME|
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ if (io && lcc->lcc_created) {
+ cl_io_end(env, io);
+ cl_io_unlock(env, io);
+ cl_io_iter_fini(env, io);
+ cl_io_fini(env, io);
+ }
+ cl_env_put(env, &lcc->lcc_refcheck);
+}
- CDEBUG(D_INFO, "calling punch for "LPX64" (all bytes after %Lu)\n",
- oa.o_id, inode->i_size);
+/**
+ * Initializes common cl-data at the typical address_space operation entry
+ * point.
+ */
+static struct ll_cl_context *ll_cl_init(struct file *file,
+ struct page *vmpage, int create)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_object *clob;
+ struct ccc_io *cio;
+
+ int refcheck;
+ int result = 0;
+
+ clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ LASSERT(clob != NULL);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return ERR_PTR(PTR_ERR(env));
+
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ memset(lcc, 0, sizeof(*lcc));
+ lcc->lcc_env = env;
+ lcc->lcc_refcheck = refcheck;
+ lcc->lcc_cookie = current;
+
+ cio = ccc_env_io(env);
+ io = cio->cui_cl.cis_io;
+ if (io == NULL && create) {
+ struct vvp_io *vio;
+ loff_t pos;
+
+ /*
+ * Loop-back driver calls ->prepare_write() and ->sendfile()
+ * methods directly, bypassing file system ->write() operation,
+ * so cl_io has to be created here.
+ */
+
+ io = &ccc_env_info(env)->cti_io;
+ vio = vvp_env_io(env);
+ ll_io_init(io, file, 1);
+
+ /* No lock at all for this kind of IO - we can't do it because
+ * we have held page lock, it would cause deadlock.
+ * XXX: This causes poor performance to loop device - One page
+ * per RPC.
+ * In order to get better performance, users should use
+ * lloop driver instead.
+ */
+ io->ci_lockreq = CILR_NEVER;
+
+ pos = (vmpage->index << CFS_PAGE_SHIFT);
+
+ /* Create a temp IO to serve write. */
+ result = cl_io_rw_init(env, io, CIT_WRITE, pos, CFS_PAGE_SIZE);
+ if (result == 0) {
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ cio->cui_iov = NULL;
+ cio->cui_nrsegs = 0;
+ result = cl_io_iter_init(env, io);
+ if (result == 0) {
+ result = cl_io_lock(env, io);
+ if (result == 0)
+ result = cl_io_start(env, io);
+ }
+ } else
+ result = io->ci_result;
+ lcc->lcc_created = 1;
+ }
- /* truncate == punch from new size to absolute end of file */
- rc = obd_punch(ll_i2obdexp(inode), &oa, lsm, inode->i_size,
- OBD_OBJECT_EOF, NULL);
- if (rc)
- CERROR("obd_truncate fails (%d) ino %lu\n", rc, inode->i_ino);
- else
- obdo_to_inode(inode, &oa, OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME);
+ lcc->lcc_io = io;
+ if (io == NULL)
+ result = -EIO;
+ if (result == 0) {
+ struct cl_page *page;
+
+ LASSERT(io != NULL);
+ LASSERT(io->ci_state == CIS_IO_GOING);
+ LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ page = cl_page_find(env, clob, vmpage->index, vmpage,
+ CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+ result = 0;
+ } else
+ result = PTR_ERR(page);
+ }
+ if (result) {
+ ll_cl_fini(lcc);
+ lcc = ERR_PTR(result);
+ }
- EXIT;
- return;
-} /* ll_truncate */
+ CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
+ vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
+ env, io);
+ return lcc;
+}
+
+static struct ll_cl_context *ll_cl_get(void)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ LASSERT(!IS_ERR(env));
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ LASSERT(env == lcc->lcc_env);
+ LASSERT(current == lcc->lcc_cookie);
+ cl_env_put(env, &refcheck);
+
+ /* env has got in ll_cl_init, so it is still usable. */
+ return lcc;
+}
-int ll_prepare_write(struct file *file, struct page *page, unsigned from,
+/**
+ * ->prepare_write() address space operation called by generic_file_write()
+ * for every page during write.
+ */
+int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
unsigned to)
{
- struct inode *inode = page->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
- struct brw_page pga;
- struct obdo oa;
- int rc = 0;
+ struct ll_cl_context *lcc;
+ int result;
ENTRY;
- if (!PageLocked(page))
- LBUG();
+ lcc = ll_cl_init(file, vmpage, 1);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ cl_page_assume(env, io, page);
+ if (cl_io_is_append(io)) {
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ /**
+ * In VFS file->page write loop, for appending, the
+ * write offset might be reset according to the new
+ * file size before holding i_mutex. So crw_pos should
+ * be reset here. BUG:17711.
+ */
+ io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ }
+ result = cl_io_prepare_write(env, io, page, from, to);
+ if (result == 0) {
+ /*
+ * Add a reference, so that page is not evicted from
+ * the cache until ->commit_write() is called.
+ */
+ cl_page_get(page);
+ lu_ref_add(&page->cp_reference, "prepare_write",
+ cfs_current());
+ } else {
+ cl_page_unassume(env, io, page);
+ ll_cl_fini(lcc);
+ }
+ /* returning 0 in prepare assumes commit must be called
+ * afterwards */
+ } else {
+ result = PTR_ERR(lcc);
+ }
+ RETURN(result);
+}
- /* Check to see if we should return -EIO right away */
- pga.pg = page;
- pga.off = offset;
- pga.count = PAGE_SIZE;
- pga.flag = 0;
+int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
+ unsigned to)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ int result;
+ ENTRY;
- oa.o_id = lsm->lsm_object_id;
- oa.o_mode = inode->i_mode;
- oa.o_valid = OBD_MD_FLID | OBD_MD_FLMODE | OBD_MD_FLTYPE;
+ lcc = ll_cl_get();
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+
+ LASSERT(cl_page_is_owned(page, io));
+ result = cl_io_commit_write(env, io, page, from, to);
+ if (cl_page_is_owned(page, io))
+ cl_page_unassume(env, io, page);
+ /*
+ * Release reference acquired by cl_io_prepare_write().
+ */
+ lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
+ cl_page_put(env, page);
+ ll_cl_fini(lcc);
+ RETURN(result);
+}
- rc = obd_brw(OBD_BRW_CHECK, ll_i2obdexp(inode), &oa, lsm, 1, &pga,
- NULL);
- if (rc)
- RETURN(rc);
+struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
+{
+ __u64 opc;
- if (PageUptodate(page))
- RETURN(0);
+ opc = crt == CRT_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
+ return ll_osscapa_get(inode, opc);
+}
- /* We're completely overwriting an existing page, so _don't_ set it up
- * to date until commit_write */
- if (from == 0 && to == PAGE_SIZE) {
- POISON_PAGE(page, 0x11);
- RETURN(0);
- }
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
- /* If are writing to a new page, no need to read old data.
- * the extent locking and getattr procedures in ll_file_write have
- * guaranteed that i_size is stable enough for our zeroing needs */
- if (inode->i_size <= offset) {
- memset(kmap(page), 0, PAGE_SIZE);
- kunmap(page);
- GOTO(prepare_done, rc = 0);
- }
+/* WARNING: This algorithm is used to reduce the contention on
+ * sbi->ll_lock. It should work well if the ra_max_pages is much
+ * greater than the single file's read-ahead window.
+ *
+ * TODO: There may exist a `global sync problem' in this implementation.
+ * Considering the global ra window is 100M, and each file's ra window is 10M,
+ * there are over 10 files trying to get its ra budget and reach
+ * ll_ra_count_get at the exactly same time. All of them will get a zero ra
+ * window, although the global window is 100M. -jay
+ */
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ unsigned long ret;
+ ENTRY;
- /* XXX could be an async ocp read.. read-ahead? */
- rc = ll_brw(OBD_BRW_READ, inode, &oa, page, 0);
- if (rc == 0) {
- /* bug 1598: don't clobber blksize */
- oa.o_valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLKSZ);
- obdo_refresh_inode(inode, &oa, oa.o_valid);
+ /**
+ * If read-ahead pages left are less than 1M, do not do read-ahead,
+ * otherwise it will form small read RPC(< 1M), which hurt server
+ * performance a lot.
+ */
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
+ if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
+ GOTO(out, ret = 0);
+
+ if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ cfs_atomic_sub(ret, &ra->ra_cur_pages);
+ ret = 0;
}
+out:
+ RETURN(ret);
+}
- EXIT;
- prepare_done:
- if (rc == 0)
- SetPageUptodate(page);
+void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ cfs_atomic_sub(len, &ra->ra_cur_pages);
+}
- return rc;
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
+{
+ LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
+ lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-int ll_write_count(struct page *page)
+void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
{
- struct inode *inode = page->mapping->host;
+ struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ ll_ra_stats_inc_sbi(sbi, which);
+}
- /* catch race with truncate */
- if (((loff_t)page->index << PAGE_SHIFT) >= inode->i_size)
- return 0;
+#define RAS_CDEBUG(ras) \
+ CDEBUG(D_READA, \
+ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
+ "csr %lu sf %lu sp %lu sl %lu \n", \
+ ras->ras_last_readpage, ras->ras_consecutive_requests, \
+ ras->ras_consecutive_pages, ras->ras_window_start, \
+ ras->ras_window_len, ras->ras_next_readahead, \
+ ras->ras_requests, ras->ras_request_index, \
+ ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
+ ras->ras_stride_pages, ras->ras_stride_length)
+
+static int index_in_window(unsigned long index, unsigned long point,
+ unsigned long before, unsigned long after)
+{
+ unsigned long start = point - before, end = point + after;
- /* catch sub-page write at end of file */
- if (((loff_t)page->index << PAGE_SHIFT) + PAGE_SIZE > inode->i_size)
- return inode->i_size % PAGE_SIZE;
+ if (start > point)
+ start = 0;
+ if (end < point)
+ end = ~0;
- return PAGE_SIZE;
+ return start <= index && index <= end;
}
-struct ll_async_page *llap_from_cookie(void *cookie)
+static struct ll_readahead_state *ll_ras_get(struct file *f)
{
- struct ll_async_page *llap = cookie;
- if (llap->llap_magic != LLAP_MAGIC)
- return ERR_PTR(-EINVAL);
- return llap;
-};
+ struct ll_file_data *fd;
+
+ fd = LUSTRE_FPRIVATE(f);
+ return &fd->fd_ras;
+}
-static int ll_ap_make_ready(void *data, int cmd)
+void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
{
- struct ll_async_page *llap;
- struct page *page;
- ENTRY;
+ struct ll_readahead_state *ras;
- llap = llap_from_cookie(data);
- if (IS_ERR(llap))
- RETURN(-EINVAL);
+ ras = ll_ras_get(f);
- page = llap->llap_page;
+ cfs_spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
- if (cmd == OBD_BRW_READ) {
- /* _sync_page beat us to it and is about to call
- * _set_async_flags which will fire off rpcs again */
- if (!test_and_clear_bit(LL_PRIVBITS_READ, &page->private))
- RETURN(-EAGAIN);
- RETURN(0);
- }
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ cfs_spin_unlock(&ras->ras_lock);
+}
- /* we're trying to write, but the page is locked.. come back later */
- if (TryLockPage(page))
- RETURN(-EAGAIN);
+void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
- LL_CDEBUG_PAGE(page, "made ready\n");
- page_cache_get(page);
+ ras = ll_ras_get(f);
- /* if we left PageDirty we might get another writepage call
- * in the future. list walkers are bright enough
- * to check page dirty so we can leave it on whatever list
- * its on. XXX also, we're called with the cli list so if
- * we got the page cache list we'd create a lock inversion
- * with the removepage path which gets the page lock then the
- * cli lock */
- clear_page_dirty(page);
- RETURN(0);
+ cfs_spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ cfs_spin_unlock(&ras->ras_lock);
}
-static int ll_ap_refresh_count(void *data, int cmd)
+static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
{
- struct ll_async_page *llap;
- ENTRY;
+ struct ll_ra_read *scan;
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(cmd != OBD_BRW_READ);
-
- llap = llap_from_cookie(data);
- if (IS_ERR(llap))
- RETURN(PTR_ERR(llap));
-
- return ll_write_count(llap->llap_page);
+ cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ if (scan->lrr_reader == current)
+ return scan;
+ }
+ return NULL;
}
-void ll_inode_fill_obdo(struct inode *inode, int cmd, struct obdo *oa)
+struct ll_ra_read *ll_ra_read_get(struct file *f)
{
- struct lov_stripe_md *lsm;
- obd_flag valid_flags;
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
- lsm = ll_i2info(inode)->lli_smd;
+ ras = ll_ras_get(f);
- oa->o_id = lsm->lsm_object_id;
- oa->o_valid = OBD_MD_FLID;
- valid_flags = OBD_MD_FLTYPE | OBD_MD_FLATIME;
- if (cmd == OBD_BRW_WRITE) {
- oa->o_valid |= OBD_MD_FLIFID | OBD_MD_FLEPOCH;
- mdc_pack_fid(obdo_fid(oa), inode->i_ino, 0, inode->i_mode);
- oa->o_easize = ll_i2info(inode)->lli_io_epoch;
+ cfs_spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ cfs_spin_unlock(&ras->ras_lock);
+ return bead;
+}
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME;
- }
+static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, struct cl_page *page,
+ struct page *vmpage)
+{
+ struct ccc_page *cp;
+ int rc;
- obdo_from_inode(oa, inode, valid_flags);
+ ENTRY;
+
+ rc = 0;
+ cl_page_assume(env, io, page);
+ lu_ref_add(&page->cp_reference, "ra", cfs_current());
+ cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ if (!cp->cpg_defer_uptodate && !Page_Uptodate(vmpage)) {
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc == -EBUSY) {
+ cp->cpg_defer_uptodate = 1;
+ cp->cpg_ra_used = 0;
+ cl_page_list_add(queue, page);
+ rc = 1;
+ } else {
+ cl_page_delete(env, page);
+ rc = -ENOLCK;
+ }
+ } else
+ /* skip completed pages */
+ cl_page_unassume(env, io, page);
+ lu_ref_del(&page->cp_reference, "ra", cfs_current());
+ cl_page_put(env, page);
+ RETURN(rc);
}
-static void ll_ap_fill_obdo(void *data, int cmd, struct obdo *oa)
+/**
+ * Initiates read-ahead of a page with given index.
+ *
+ * \retval +ve: page was added to \a queue.
+ *
+ * \retval -ENOLCK: there is no extent lock for this part of a file, stop
+ * read-ahead.
+ *
+ * \retval -ve, 0: page wasn't added to \a queue for other reason.
+ */
+static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ pgoff_t index, struct address_space *mapping)
{
- struct ll_async_page *llap;
+ struct page *vmpage;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
+ struct cl_page *page;
+ enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+ unsigned int gfp_mask;
+ int rc = 0;
+ const char *msg = NULL;
+
ENTRY;
- llap = llap_from_cookie(data);
- if (IS_ERR(llap)) {
- EXIT;
- return;
+ gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
+#ifdef __GFP_NOWARN
+ gfp_mask |= __GFP_NOWARN;
+#endif
+ vmpage = grab_cache_page_nowait_gfp(mapping, index, gfp_mask);
+ if (vmpage != NULL) {
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping == mapping) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ rc = cl_read_ahead_page(env, io, queue,
+ page, vmpage);
+ if (rc == -ENOLCK) {
+ which = RA_STAT_FAILED_MATCH;
+ msg = "lock match failed";
+ }
+ } else {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "cl_page_find failed";
+ }
+ } else {
+ which = RA_STAT_WRONG_GRAB_PAGE;
+ msg = "g_c_p_n returned invalid page";
+ }
+ if (rc != 1)
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ } else {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
}
-
- ll_inode_fill_obdo(llap->llap_page->mapping->host, cmd, oa);
- EXIT;
+ if (msg != NULL) {
+ ll_ra_stats_inc(mapping, which);
+ CDEBUG(D_READA, "%s\n", msg);
+ }
+ RETURN(rc);
}
-static struct obd_async_page_ops ll_async_page_ops = {
- .ap_make_ready = ll_ap_make_ready,
- .ap_refresh_count = ll_ap_refresh_count,
- .ap_fill_obdo = ll_ap_fill_obdo,
- .ap_completion = ll_ap_completion,
-};
+#define RIA_DEBUG(ria) \
+ CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
+ ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
+ ria->ria_pages)
-#define page_llap(page) \
- ((struct ll_async_page *)((page)->private & ~LL_PRIVBITS_MASK))
+#define RAS_INCREASE_STEP PTLRPC_MAX_BRW_PAGES
-/* XXX have the exp be an argument? */
-struct ll_async_page *llap_from_page(struct page *page)
+static inline int stride_io_mode(struct ll_readahead_state *ras)
{
- struct ll_async_page *llap;
- struct obd_export *exp;
- struct inode *inode = page->mapping->host;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
- ENTRY;
+ return ras->ras_consecutive_stride_requests > 1;
+}
+/* The function calculates how much pages will be read in
+ * [off, off + length], in such stride IO area,
+ * stride_offset = st_off, stride_lengh = st_len,
+ * stride_pages = st_pgs
+ *
+ * |------------------|*****|------------------|*****|------------|*****|....
+ * st_off
+ * |--- st_pgs ---|
+ * |----- st_len -----|
+ *
+ * How many pages it should read in such pattern
+ * |-------------------------------------------------------------|
+ * off
+ * |<------ length ------->|
+ *
+ * = |<----->| + |-------------------------------------| + |---|
+ * start_left st_pgs * i end_left
+ */
+static unsigned long
+stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
+ unsigned long off, unsigned long length)
+{
+ __u64 start = off > st_off ? off - st_off : 0;
+ __u64 end = off + length > st_off ? off + length - st_off : 0;
+ unsigned long start_left = 0;
+ unsigned long end_left = 0;
+ unsigned long pg_count;
+
+ if (st_len == 0 || length == 0 || end == 0)
+ return length;
+
+ start_left = do_div(start, st_len);
+ if (start_left < st_pgs)
+ start_left = st_pgs - start_left;
+ else
+ start_left = 0;
- llap = page_llap(page);
- if (llap != NULL) {
- if (llap->llap_magic != LLAP_MAGIC)
- RETURN(ERR_PTR(-EINVAL));
- RETURN(llap);
- }
-
- exp = ll_i2obdexp(page->mapping->host);
- if (exp == NULL)
- RETURN(ERR_PTR(-EINVAL));
-
- OBD_ALLOC(llap, sizeof(*llap));
- if (llap == NULL)
- RETURN(ERR_PTR(-ENOMEM));
- llap->llap_magic = LLAP_MAGIC;
- rc = obd_prep_async_page(exp, ll_i2info(inode)->lli_smd,
- NULL, page,
- (obd_off)page->index << PAGE_SHIFT,
- &ll_async_page_ops, llap, &llap->llap_cookie);
- if (rc) {
- OBD_FREE(llap, sizeof(*llap));
- RETURN(ERR_PTR(rc));
- }
+ end_left = do_div(end, st_len);
+ if (end_left > st_pgs)
+ end_left = st_pgs;
+
+ CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n",
+ start, end, start_left, end_left);
- CDEBUG(D_CACHE, "llap %p page %p cookie %p obj off "LPU64"\n", llap,
- page, llap->llap_cookie, (obd_off)page->index << PAGE_SHIFT);
- /* also zeroing the PRIVBITS low order bitflags */
- page->private = (unsigned long)llap;
- llap->llap_page = page;
+ if (start == end)
+ pg_count = end_left - (st_pgs - start_left);
+ else
+ pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- spin_lock(&sbi->ll_pglist_lock);
- sbi->ll_pglist_gen++;
- list_add_tail(&llap->llap_proc_item, &sbi->ll_pglist);
- spin_unlock(&sbi->ll_pglist_lock);
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
+ "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
- RETURN(llap);
+ return pg_count;
}
-/* update our write count to account for i_size increases that may have
- * happened since we've queued the page for io. */
-
-/* be careful not to return success without setting the page Uptodate or
- * the next pass through prepare_write will read in stale data from disk. */
-int ll_commit_write(struct file *file, struct page *page, unsigned from,
- unsigned to)
+static int ria_page_count(struct ra_io_arg *ria)
{
- struct inode *inode = page->mapping->host;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_export *exp = NULL;
- struct ll_async_page *llap;
- loff_t size;
- int rc = 0;
- ENTRY;
+ __u64 length = ria->ria_end >= ria->ria_start ?
+ ria->ria_end - ria->ria_start + 1 : 0;
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
- LASSERT(inode == file->f_dentry->d_inode);
- LASSERT(PageLocked(page));
-
- CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",
- inode, page, from, to, page->index);
-
- llap = llap_from_page(page);
- if (IS_ERR(llap))
- RETURN(PTR_ERR(llap));
-
- /* queue a write for some time in the future the first time we
- * dirty the page */
- if (!PageDirty(page)) {
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
- LPROC_LL_DIRTY_MISSES);
-
- exp = ll_i2obdexp(inode);
- if (exp == NULL)
- RETURN(-EINVAL);
-
- /* _make_ready only sees llap once we've unlocked the page */
- llap->llap_write_queued = 1;
- rc = obd_queue_async_io(exp, lsm, NULL, llap->llap_cookie,
- OBD_BRW_WRITE, 0, 0, 0, 0);
- if (rc != 0) { /* async failed, try sync.. */
- struct obd_sync_io_container *osic;
- osic_init(&osic);
-
- llap->llap_write_queued = 0;
- rc = obd_queue_sync_io(exp, lsm, NULL, osic,
- llap->llap_cookie,
- OBD_BRW_WRITE, 0, to, 0);
- if (rc)
- GOTO(free_osic, rc);
-
- rc = obd_trigger_sync_io(exp, lsm, NULL, osic);
- if (rc)
- GOTO(free_osic, rc);
-
- rc = osic_wait(osic);
-free_osic:
- osic_release(osic);
- GOTO(out, rc);
- }
- LL_CDEBUG_PAGE(page, "write queued\n");
- //llap_write_pending(inode, llap);
- } else {
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
- LPROC_LL_DIRTY_HITS);
- }
+ return stride_pg_count(ria->ria_stoff, ria->ria_length,
+ ria->ria_pages, ria->ria_start,
+ length);
+}
- /* put the page in the page cache, from now on ll_removepage is
- * responsible for cleaning up the llap */
- set_page_dirty(page);
+/*Check whether the index is in the defined ra-window */
+static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
+{
+ /* If ria_length == ria_pages, it means non-stride I/O mode,
+ * idx should always inside read-ahead window in this case
+ * For stride I/O mode, just check whether the idx is inside
+ * the ria_pages. */
+ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
+ (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
+ ria->ria_length < ria->ria_pages);
+}
-out:
- if (rc == 0) {
- /* XXX needs to be pushed down to the OSC as EOC */
- size = (((obd_off)page->index) << PAGE_SHIFT) + to;
- if (size > inode->i_size) {
- inode->i_size = size;
- /* see commentary in file.c:ll_inode_getattr() */
- set_bit(LLI_F_PREFER_EXTENDED_SIZE, &lli->lli_flags);
+static int ll_read_ahead_pages(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *queue,
+ struct ra_io_arg *ria,
+ unsigned long *reserved_pages,
+ struct address_space *mapping,
+ unsigned long *ra_end)
+{
+ int rc, count = 0, stride_ria;
+ unsigned long page_idx;
+
+ LASSERT(ria != NULL);
+ RIA_DEBUG(ria);
+
+ stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
+ for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
+ *reserved_pages > 0; page_idx++) {
+ if (ras_inside_ra_window(page_idx, ria)) {
+ /* If the page is inside the read-ahead window*/
+ rc = ll_read_ahead_page(env, io, queue,
+ page_idx, mapping);
+ if (rc == 1) {
+ (*reserved_pages)--;
+ count ++;
+ } else if (rc == -ENOLCK)
+ break;
+ } else if (stride_ria) {
+ /* If it is not in the read-ahead window, and it is
+ * read-ahead mode, then check whether it should skip
+ * the stride gap */
+ pgoff_t offset;
+ /* FIXME: This assertion only is valid when it is for
+ * forward read-ahead, it will be fixed when backward
+ * read-ahead is implemented */
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
+ "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
+ offset = page_idx - ria->ria_stoff;
+ offset = offset % (ria->ria_length);
+ if (offset > ria->ria_pages) {
+ page_idx += ria->ria_length - offset;
+ CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+ ria->ria_length - offset);
+ continue;
+ }
}
- SetPageUptodate(page);
}
- RETURN(rc);
+ *ra_end = page_idx;
+ return count;
}
-/* the kernel calls us here when a page is unhashed from the page cache.
- * the page will be locked and the kernel is holding a spinlock, so
- * we need to be careful. we're just tearing down our book-keeping
- * here. */
-void ll_removepage(struct page *page)
+int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct ll_readahead_state *ras, struct address_space *mapping,
+ struct cl_page_list *queue, int flags)
{
- struct inode *inode = page->mapping->host;
- struct obd_export *exp;
- struct ll_async_page *llap;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
+ struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_thread_info *vti = vvp_env_info(env);
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ unsigned long start = 0, end = 0, reserved;
+ unsigned long ra_end, len;
+ struct inode *inode;
+ struct ll_ra_read *bead;
+ struct ra_io_arg *ria = &vti->vti_ria;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ int ret = 0;
+ __u64 kms;
ENTRY;
- LASSERT(!in_interrupt());
+ inode = mapping->host;
+ lli = ll_i2info(inode);
+ clob = lli->lli_clob;
- /* sync pages or failed read pages can leave pages in the page
- * cache that don't have our data associated with them anymore */
- if (page->private == 0) {
- EXIT;
- return;
+ memset(ria, 0, sizeof *ria);
+
+ cl_object_attr_lock(clob);
+ ret = cl_object_attr_get(env, clob, attr);
+ cl_object_attr_unlock(clob);
+
+ if (ret != 0)
+ RETURN(ret);
+ kms = attr->cat_kms;
+ if (kms == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ RETURN(0);
}
- LL_CDEBUG_PAGE(page, "being evicted\n");
+ cfs_spin_lock(&ras->ras_lock);
+ if (vio->cui_ra_window_set)
+ bead = &vio->cui_bead;
+ else
+ bead = NULL;
- exp = ll_i2obdexp(inode);
- if (exp == NULL) {
- CERROR("page %p ind %lu gave null export\n", page,
- page->index);
- EXIT;
- return;
+ /* Enlarge the RA window to encompass the full read */
+ if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+ bead->lrr_start + bead->lrr_count) {
+ ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ ras->ras_window_start;
+ }
+ /* Reserve a part of the read-ahead window that we'll be issuing */
+ if (ras->ras_window_len) {
+ start = ras->ras_next_readahead;
+ end = ras->ras_window_start + ras->ras_window_len - 1;
+ }
+ if (end != 0) {
+ unsigned long tmp_end;
+ /*
+ * Align RA window to an optimal boundary.
+ *
+ * XXX This would be better to align to cl_max_pages_per_rpc
+ * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
+ * be aligned to the RAID stripe size in the future and that
+ * is more important than the RPC size.
+ */
+ tmp_end = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1))) - 1;
+ if (tmp_end > start)
+ end = tmp_end;
+
+ /* Truncate RA window to end of file */
+ end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+
+ ras->ras_next_readahead = max(end, end + 1);
+ RAS_CDEBUG(ras);
+ }
+ ria->ria_start = start;
+ ria->ria_end = end;
+ /* If stride I/O mode is detected, get stride window*/
+ if (stride_io_mode(ras)) {
+ ria->ria_stoff = ras->ras_stride_offset;
+ ria->ria_length = ras->ras_stride_length;
+ ria->ria_pages = ras->ras_stride_pages;
}
+ cfs_spin_unlock(&ras->ras_lock);
- llap = llap_from_page(page);
- if (IS_ERR(llap)) {
- CERROR("page %p ind %lu couldn't find llap: %ld\n", page,
- page->index, PTR_ERR(llap));
- EXIT;
- return;
+ if (end == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ RETURN(0);
}
+ len = ria_page_count(ria);
+ if (len == 0)
+ RETURN(0);
- //llap_write_complete(inode, llap);
- rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,
- llap->llap_cookie);
- if (rc != 0)
- CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);
-
- /* this unconditional free is only safe because the page lock
- * is providing exclusivity to memory pressure/truncate/writeback..*/
- page->private = 0;
-
- spin_lock(&sbi->ll_pglist_lock);
- if (!list_empty(&llap->llap_proc_item))
- list_del_init(&llap->llap_proc_item);
- sbi->ll_pglist_gen++;
- spin_unlock(&sbi->ll_pglist_lock);
- OBD_FREE(llap, sizeof(*llap));
- EXIT;
-}
+ reserved = ll_ra_count_get(ll_i2sbi(inode), len);
-static int ll_page_matches(struct page *page)
-{
- struct lustre_handle match_lockh = {0};
- struct inode *inode = page->mapping->host;
- struct ldlm_extent page_extent;
- int flags, matches;
- ENTRY;
+ if (reserved < len)
+ ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
- page_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
- page_extent.end = page_extent.start + PAGE_CACHE_SIZE - 1;
- flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
- matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
- ll_i2info(inode)->lli_smd, LDLM_EXTENT,
- &page_extent, sizeof(page_extent),
- LCK_PR, &flags, inode, &match_lockh);
- if (matches < 0) {
- LL_CDEBUG_PAGE(page, "lock match failed\n");
- RETURN(matches);
- }
- if (matches) {
- obd_cancel(ll_i2sbi(inode)->ll_osc_exp,
- ll_i2info(inode)->lli_smd, LCK_PR, &match_lockh);
- }
- RETURN(matches);
-}
-
-static int ll_issue_page_read(struct obd_export *exp,
- struct ll_async_page *llap, int defer_uptodate)
-{
- struct page *page = llap->llap_page;
- int rc;
-
- /* we don't issue this page as URGENT so that it can be batched
- * with other pages by the kernel's read-ahead. We have a strong
- * requirement that readpage() callers must call wait_on_page()
- * or lock_page() to get into ->sync_page() to trigger the IO */
- llap->llap_defer_uptodate = defer_uptodate;
- page_cache_get(page);
- set_bit(LL_PRIVBITS_READ, &page->private); /* see ll_sync_page() */
- rc = obd_queue_async_io(exp, ll_i2info(page->mapping->host)->lli_smd,
- NULL, llap->llap_cookie, OBD_BRW_READ, 0,
- PAGE_SIZE, 0, ASYNC_COUNT_STABLE);
- if (rc) {
- LL_CDEBUG_PAGE(page, "read queueing failed\n");
- clear_bit(LL_PRIVBITS_READ, &page->private);
- page_cache_release(page);
- }
- RETURN(rc);
-}
+ CDEBUG(D_READA, "reserved page %lu \n", reserved);
-static void ll_readahead(struct ll_readahead_state *ras,
- struct obd_export *exp, struct address_space *mapping)
-{
- unsigned long i, start, end;
- struct ll_async_page *llap;
- struct page *page;
- int rc;
+ ret = ll_read_ahead_pages(env, io, queue,
+ ria, &reserved, mapping, &ra_end);
- if (mapping->host->i_size == 0)
- return;
+ LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
+ if (reserved != 0)
+ ll_ra_count_put(ll_i2sbi(inode), reserved);
+
+ if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
+ ll_ra_stats_inc(mapping, RA_STAT_EOF);
- spin_lock(&ras->ras_lock);
-
- /* make sure to issue a window's worth of read-ahead pages */
- end = ras->ras_last;
- start = end - ras->ras_window;
- if (start > end)
- start = 0;
-
- /* but don't iterate over pages that we've already issued. this
- * will set start to end + 1 if we've already read-ahead up to
- * ras_last sothe for() won't be entered */
- if (ras->ras_next_index > start)
- start = ras->ras_next_index;
- if (end != ~0UL)
- ras->ras_next_index = end + 1;
-
- CDEBUG(D_READA, "ni %lu last %lu win %lu: reading from %lu to %lu\n",
- ras->ras_next_index, ras->ras_last, ras->ras_window,
- start, end);
-
- spin_unlock(&ras->ras_lock);
-
- /* clamp to filesize */
- i = (mapping->host->i_size - 1) >> PAGE_CACHE_SHIFT;
- end = min(end, i);
-
- for (i = start; i <= end; i++) {
- /* grab_cache_page_nowait returns null if this races with
- * truncating the page (page->mapping == NULL) */
- page = grab_cache_page_nowait(mapping, i);
- if (page == NULL)
- continue;
-
- /* the book-keeping above promises that we've tried
- * all the indices from start to end, so we don't
- * stop if anyone returns an error. This may not be good. */
- if (Page_Uptodate(page) || ll_page_matches(page) <= 0)
- goto next_page;
-
- llap = llap_from_page(page);
- if (IS_ERR(llap) || llap->llap_defer_uptodate)
- goto next_page;
-
- rc = ll_issue_page_read(exp, llap, 1);
- if (rc == 0)
- LL_CDEBUG_PAGE(page, "started read-ahead\n");
- if (rc) {
- next_page:
- LL_CDEBUG_PAGE(page, "skipping read-ahead\n");
-
- unlock_page(page);
+ /* if we didn't get to the end of the region we reserved from
+ * the ras we need to go back and update the ras so that the
+ * next read-ahead tries from where we left off. we only do so
+ * if the region we failed to issue read-ahead on is still ahead
+ * of the app and behind the next index to start read-ahead from */
+ CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+ ra_end, end, ria->ria_end);
+
+ if (ra_end != end + 1) {
+ cfs_spin_lock(&ras->ras_lock);
+ if (ra_end < ras->ras_next_readahead &&
+ index_in_window(ra_end, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = ra_end;
+ RAS_CDEBUG(ras);
}
- page_cache_release(page);
+ cfs_spin_unlock(&ras->ras_lock);
}
+
+ RETURN(ret);
+}
+
+static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_window_start = index & (~(RAS_INCREASE_STEP - 1));
}
-/* XXX this should really bubble up somehow. */
-#define LL_RA_MIN ((unsigned long)PTL_MD_MAX_PAGES / 2)
-#define LL_RA_MAX ((unsigned long)(32 * PTL_MD_MAX_PAGES))
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_reset(struct ll_readahead_state *ras, unsigned long index)
+{
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_requests = 0;
+ ras->ras_consecutive_pages = 0;
+ ras->ras_window_len = 0;
+ ras_set_start(ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index);
+
+ RAS_CDEBUG(ras);
+}
/* called with the ras_lock held or from places where it doesn't matter */
-static void ll_readahead_set(struct ll_readahead_state *ras,
- unsigned long index)
-{
- ras->ras_next_index = index;
- if (ras->ras_next_index != ~0UL)
- ras->ras_next_index++;
- ras->ras_window = LL_RA_MIN;
- ras->ras_last = ras->ras_next_index + ras->ras_window;
- if (ras->ras_last < ras->ras_next_index)
- ras->ras_last = ~0UL;
- CDEBUG(D_READA, "ni %lu last %lu win %lu: set %lu\n",
- ras->ras_next_index, ras->ras_last, ras->ras_window,
- index);
-}
-
-void ll_readahead_init(struct ll_readahead_state *ras)
-{
- spin_lock_init(&ras->ras_lock);
- ll_readahead_set(ras, 0);
-}
-
-static void ll_readahead_update(struct ll_readahead_state *ras,
- unsigned long index, int hit)
-{
- unsigned long issued_start, new_last;
-
- spin_lock(&ras->ras_lock);
-
- /* we're interested in noticing the index's relation to the
- * previously issued read-ahead pages */
- issued_start = ras->ras_next_index - ras->ras_window - 1;
- if (issued_start > ras->ras_next_index)
- issued_start = 0;
-
- CDEBUG(D_READA, "ni %lu last %lu win %lu: %s ind %lu start %lu\n",
- ras->ras_next_index, ras->ras_last, ras->ras_window,
- hit ? "hit" : "miss", index, issued_start);
- if (!hit &&
- index == ras->ras_next_index && index == ras->ras_last + 1) {
- /* special case the kernel's read-ahead running into the
- * page just beyond our read-ahead window as an extension
- * of our read-ahead. sigh. wishing it was easier to
- * turn off 2.4's read-ahead. */
- ras->ras_window = min(LL_RA_MAX, ras->ras_window + 1);
- if (index != ~0UL)
- ras->ras_next_index = index + 1;
- ras->ras_last = index;
- } else if (!hit &&
- (index > issued_start || ras->ras_next_index >= index)) {
- /* deal with a miss way out of the window. we interpret
- * this as a seek and restart the window */
- ll_readahead_set(ras, index);
-
- } else if (!hit &&
- issued_start <= index && index < ras->ras_next_index) {
- /* a miss inside the window? surely its memory pressure
- * evicting our read pages before the app can see them.
- * we shrink the window aggressively */
- unsigned long old_window = ras->ras_window;
-
- ras->ras_window = max(ras->ras_window / 2, LL_RA_MIN);
- ras->ras_last -= old_window - ras->ras_window;
- if (ras->ras_next_index > ras->ras_last)
- ras->ras_next_index = ras->ras_last + 1;
- CDEBUG(D_READA, "ni %lu last %lu win %lu: miss inside\n",
- ras->ras_next_index, ras->ras_last, ras->ras_window);
-
- } else if (hit &&
- issued_start <= index && index < ras->ras_next_index) {
- /* a hit inside the window. grow the window by twice the
- * number of pages that are satisified within the window. */
- ras->ras_window = min(LL_RA_MAX, ras->ras_window + 2);
-
- /* we want the next readahead pass to issue a windows worth
- * beyond where the app currently is */
- new_last = index + ras->ras_window;
- if (new_last > ras->ras_last)
- ras->ras_last = new_last;
-
- CDEBUG(D_READA, "ni %lu last %lu win %lu: extended window/last\n",
- ras->ras_next_index, ras->ras_last, ras->ras_window);
- }
+static void ras_stride_reset(struct ll_readahead_state *ras)
+{
+ ras->ras_consecutive_stride_requests = 0;
+ ras->ras_stride_length = 0;
+ ras->ras_stride_pages = 0;
+ RAS_CDEBUG(ras);
+}
- spin_unlock(&ras->ras_lock);
+void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
+{
+ cfs_spin_lock_init(&ras->ras_lock);
+ ras_reset(ras, 0);
+ ras->ras_requests = 0;
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
- * for now we do our readpage the same on both 2.4 and 2.5. The kernel's
- * read-ahead assumes it is valid to issue readpage all the way up to
- * i_size, but our dlm locks make that not the case. We disable the
- * kernel's read-ahead and do our own by walking ahead in the page cache
- * checking for dlm lock coverage. the main difference between 2.4 and
- * 2.6 is how read-ahead gets batched and issued, but we're using our own,
- * so they look the same.
+ * Check whether the read request is in the stride window.
+ * If it is in the stride window, return 1, otherwise return 0.
*/
-int ll_readpage(struct file *filp, struct page *page)
+static int index_in_stride_window(unsigned long index,
+ struct ll_readahead_state *ras,
+ struct inode *inode)
{
- struct ll_file_data *fd = filp->private_data;
- struct inode *inode = page->mapping->host;
- struct obd_export *exp;
- int rc;
- struct ll_async_page *llap;
- ENTRY;
+ unsigned long stride_gap = index - ras->ras_last_readpage - 1;
- LASSERT(PageLocked(page));
- LASSERT(!PageUptodate(page));
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),offset="LPX64"\n",
- inode->i_ino, inode->i_generation, inode,
- (((obd_off)page->index) << PAGE_SHIFT));
- LASSERT(atomic_read(&filp->f_dentry->d_inode->i_count) > 0);
-
- exp = ll_i2obdexp(inode);
- if (exp == NULL)
- GOTO(out, rc = -EINVAL);
-
- llap = llap_from_page(page);
- if (IS_ERR(llap))
- GOTO(out, rc = PTR_ERR(llap));
-
- if (llap->llap_defer_uptodate) {
- ll_readahead_update(&fd->fd_ras, page->index, 1);
- LL_CDEBUG_PAGE(page, "marking uptodate from defer\n");
- SetPageUptodate(page);
- ll_readahead(&fd->fd_ras, exp, page->mapping);
- unlock_page(page);
- RETURN(0);
+ if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
+ ras->ras_stride_pages == ras->ras_stride_length)
+ return 0;
+
+ /* If it is contiguous read */
+ if (stride_gap == 0)
+ return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
+
+ /*Otherwise check the stride by itself */
+ return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
+ ras->ras_consecutive_pages == ras->ras_stride_pages;
+}
+
+static void ras_update_stride_detector(struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ unsigned long stride_gap = index - ras->ras_last_readpage - 1;
+
+ if (!stride_io_mode(ras) && (stride_gap != 0 ||
+ ras->ras_consecutive_stride_requests == 0)) {
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
+ }
+ LASSERT(ras->ras_request_index == 0);
+ LASSERT(ras->ras_consecutive_stride_requests == 0);
+
+ if (index <= ras->ras_last_readpage) {
+ /*Reset stride window for forward read*/
+ ras_stride_reset(ras);
+ return;
}
- ll_readahead_update(&fd->fd_ras, page->index, 0);
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
+
+ RAS_CDEBUG(ras);
+ return;
+}
+
+static unsigned long
+stride_page_count(struct ll_readahead_state *ras, unsigned long len)
+{
+ return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
+ ras->ras_stride_pages, ras->ras_stride_offset,
+ len);
+}
+
+/* Stride Read-ahead window will be increased inc_len according to
+ * stride I/O pattern */
+static void ras_stride_increase_window(struct ll_readahead_state *ras,
+ struct ll_ra_info *ra,
+ unsigned long inc_len)
+{
+ unsigned long left, step, window_len;
+ unsigned long stride_len;
+
+ LASSERT(ras->ras_stride_length > 0);
+ LASSERTF(ras->ras_window_start + ras->ras_window_len
+ >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
+ " stride_offset %lu\n", ras->ras_window_start,
+ ras->ras_window_len, ras->ras_stride_offset);
+
+ stride_len = ras->ras_window_start + ras->ras_window_len -
+ ras->ras_stride_offset;
+
+ left = stride_len % ras->ras_stride_length;
+ window_len = ras->ras_window_len - left;
+
+ if (left < ras->ras_stride_pages)
+ left += inc_len;
+ else
+ left = ras->ras_stride_pages + inc_len;
+
+ LASSERT(ras->ras_stride_pages != 0);
+
+ step = left / ras->ras_stride_pages;
+ left %= ras->ras_stride_pages;
+
+ window_len += step * ras->ras_stride_length + left;
+
+ if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
+ ras->ras_window_len = window_len;
+
+ RAS_CDEBUG(ras);
+}
+
+static void ras_increase_window(struct ll_readahead_state *ras,
+ struct ll_ra_info *ra, struct inode *inode)
+{
+ /* The stretch of ra-window should be aligned with max rpc_size
+ * but current clio architecture does not support retrieve such
+ * information from lower layer. FIXME later
+ */
+ if (stride_io_mode(ras))
+ ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP);
+ else
+ ras->ras_window_len = min(ras->ras_window_len +
+ RAS_INCREASE_STEP,
+ ra->ra_max_pages_per_file);
+}
+
+void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0, stride_detect = 0, ra_miss = 0;
+ ENTRY;
- rc = ll_page_matches(page);
- if (rc < 0)
- GOTO(out, rc);
+ cfs_spin_lock(&ras->ras_lock);
+
+ ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
+
+ /* reset the read-ahead window in two cases. First when the app seeks
+ * or reads to some other part of the file. Secondly if we get a
+ * read-ahead miss that we think we've previously issued. This can
+ * be a symptom of there being so many read-ahead pages that the VM is
+ * reclaiming it before we get to it. */
+ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
+ zero = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
+ } else if (!hit && ras->ras_window_len &&
+ index < ras->ras_next_readahead &&
+ index_in_window(index, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ra_miss = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
+ }
- if (rc == 0) {
- static unsigned long next_print;
- CDEBUG(D_INODE, "didn't match a lock\n");
- if (time_after(jiffies, next_print)) {
- next_print = jiffies + 30 * HZ;
- CERROR("not covered by a lock (mmap?). check debug "
- "logs.\n");
+ /* On the second access to a file smaller than the tunable
+ * ra_max_read_ahead_whole_pages trigger RA on all pages in the
+ * file up to ra_max_pages_per_file. This is simply a best effort
+ * and only occurs once per open file. Normal RA behavior is reverted
+ * to for subsequent IO. The mmap case does not increment
+ * ras_requests and thus can never trigger this behavior. */
+ if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ __u64 kms_pages;
+
+ kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
+ CFS_PAGE_SHIFT;
+
+ CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
+ ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
+
+ if (kms_pages &&
+ kms_pages <= ra->ra_max_read_ahead_whole_pages) {
+ ras->ras_window_start = 0;
+ ras->ras_last_readpage = 0;
+ ras->ras_next_readahead = 0;
+ ras->ras_window_len = min(ra->ra_max_pages_per_file,
+ ra->ra_max_read_ahead_whole_pages);
+ GOTO(out_unlock, 0);
+ }
+ }
+ if (zero) {
+ /* check whether it is in stride I/O mode*/
+ if (!index_in_stride_window(index, ras, inode)) {
+ if (ras->ras_consecutive_stride_requests == 0 &&
+ ras->ras_request_index == 0) {
+ ras_update_stride_detector(ras, index);
+ ras->ras_consecutive_stride_requests ++;
+ } else {
+ ras_stride_reset(ras);
+ }
+ ras_reset(ras, index);
+ ras->ras_consecutive_pages++;
+ GOTO(out_unlock, 0);
+ } else {
+ ras->ras_consecutive_pages = 0;
+ ras->ras_consecutive_requests = 0;
+ if (++ras->ras_consecutive_stride_requests > 1)
+ stride_detect = 1;
+ RAS_CDEBUG(ras);
+ }
+ } else {
+ if (ra_miss) {
+ if (index_in_stride_window(index, ras, inode) &&
+ stride_io_mode(ras)) {
+ /*If stride-RA hit cache miss, the stride dector
+ *will not be reset to avoid the overhead of
+ *redetecting read-ahead mode */
+ if (index != ras->ras_last_readpage + 1)
+ ras->ras_consecutive_pages = 0;
+ ras_reset(ras, index);
+ RAS_CDEBUG(ras);
+ } else {
+ /* Reset both stride window and normal RA
+ * window */
+ ras_reset(ras, index);
+ ras->ras_consecutive_pages++;
+ ras_stride_reset(ras);
+ GOTO(out_unlock, 0);
+ }
+ } else if (stride_io_mode(ras)) {
+ /* If this is contiguous read but in stride I/O mode
+ * currently, check whether stride step still is valid,
+ * if invalid, it will reset the stride ra window*/
+ if (!index_in_stride_window(index, ras, inode)) {
+ /* Shrink stride read-ahead window to be zero */
+ ras_stride_reset(ras);
+ ras->ras_window_len = 0;
+ ras->ras_next_readahead = index;
+ }
}
}
+ ras->ras_consecutive_pages++;
+ ras->ras_last_readpage = index;
+ ras_set_start(ras, index);
+
+ if (stride_io_mode(ras))
+ /* Since stride readahead is sentivite to the offset
+ * of read-ahead, so we use original offset here,
+ * instead of ras_window_start, which is 1M aligned*/
+ ras->ras_next_readahead = max(index,
+ ras->ras_next_readahead);
+ else
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
+ RAS_CDEBUG(ras);
+
+ /* Trigger RA in the mmap case where ras_consecutive_requests
+ * is not incremented and thus can't be used to trigger RA */
+ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
+ ras->ras_window_len = RAS_INCREASE_STEP;
+ GOTO(out_unlock, 0);
+ }
- rc = ll_issue_page_read(exp, llap, 0);
- if (rc == 0) {
- LL_CDEBUG_PAGE(page, "queued readpage\n");
- if ((ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD))
- ll_readahead(&fd->fd_ras, exp, page->mapping);
+ /* Initially reset the stride window offset to next_readahead*/
+ if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
+ /**
+ * Once stride IO mode is detected, next_readahead should be
+ * reset to make sure next_readahead > stride offset
+ */
+ ras->ras_next_readahead = max(index, ras->ras_next_readahead);
+ ras->ras_stride_offset = index;
+ ras->ras_window_len = RAS_INCREASE_STEP;
}
-out:
- if (rc)
- unlock_page(page);
- RETURN(rc);
+
+ /* The initial ras_window_len is set to the request size. To avoid
+ * uselessly reading and discarding pages for random IO the window is
+ * only increased once per consecutive request received. */
+ if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
+ !ras->ras_request_index)
+ ras_increase_window(ras, ra, inode);
+ EXIT;
+out_unlock:
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ cfs_spin_unlock(&ras->ras_lock);
+ return;
}
-/* this is for read pages. we issue them as ready but not urgent. when
- * someone waits on them we fire them off, hopefully merged with adjacent
- * reads that were queued by read-ahead. */
-int ll_sync_page(struct page *page)
+int ll_writepage(struct page *vmpage, struct writeback_control *unused)
{
- struct obd_export *exp;
- struct ll_async_page *llap;
- int rc;
+ struct inode *inode = vmpage->mapping->host;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob;
+ struct cl_2queue *queue;
+ struct cl_env_nest nest;
+ int result;
ENTRY;
- /* we're using a low bit flag to signify that a queued read should
- * be issued once someone goes to lock it. it is also cleared
- * as the page is built into an RPC */
- if (!test_and_clear_bit(LL_PRIVBITS_READ, &page->private))
- RETURN(0);
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
- /* careful to only deref page->mapping after checking the bit */
- exp = ll_i2obdexp(page->mapping->host);
- if (exp == NULL)
+ if (ll_i2dtexp(inode) == NULL)
RETURN(-EINVAL);
-
- llap = llap_from_page(page);
- if (IS_ERR(llap))
- RETURN(PTR_ERR(llap));
- LL_CDEBUG_PAGE(page, "setting ready|urgent\n");
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = &ccc_env_info(env)->cti_io;
+ queue = &vvp_env_info(env)->vti_queue;
+ clob = ll_i2info(inode)->lli_clob;
+ LASSERT(clob != NULL);
+
+ io->ci_obj = clob;
+ result = cl_io_init(env, io, CIT_MISC, clob);
+ if (result == 0) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lu_ref_add(&page->cp_reference, "writepage",
+ cfs_current());
+ cl_page_assume(env, io, page);
+ /*
+ * Mark page dirty, because this is what
+ * ->vio_submit()->cpo_prep_write() assumes.
+ *
+ * XXX better solution is to detect this from within
+ * cl_io_submit_rw() somehow.
+ */
+ set_page_dirty(vmpage);
+ cl_2queue_init_page(queue, page);
+ result = cl_io_submit_rw(env, io, CRT_WRITE,
+ queue, CRP_NORMAL);
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ if (result != 0) {
+ /*
+ * There is no need to clear PG_writeback, as
+ * cl_io_submit_rw() calls completion callback
+ * on failure.
+ */
+ /*
+ * Re-dirty page on error so it retries write,
+ * but not in case when IO has actually
+ * occurred and completed with an error.
+ */
+ if (!PageError(vmpage))
+ set_page_dirty(vmpage);
+ }
+ LASSERT(!cl_page_is_owned(page, io));
+ lu_ref_del(&page->cp_reference,
+ "writepage", cfs_current());
+ cl_page_put(env, page);
+ cl_2queue_fini(env, queue);
+ }
+ }
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+ RETURN(result);
+}
+
+int ll_readpage(struct file *file, struct page *vmpage)
+{
+ struct ll_cl_context *lcc;
+ int result;
+ ENTRY;
- rc = obd_set_async_flags(exp, ll_i2info(page->mapping->host)->lli_smd,
- NULL, llap->llap_cookie,
- ASYNC_READY|ASYNC_URGENT);
- return rc;
+ lcc = ll_cl_init(file, vmpage, 0);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (likely(!PageUptodate(vmpage))) {
+ cl_page_assume(env, io, page);
+ result = cl_io_read_page(env, io, page);
+ } else {
+ /* Page from a non-object file. */
+ LASSERT(!ll_i2info(vmpage->mapping->host)->lli_smd);
+ unlock_page(vmpage);
+ result = 0;
+ }
+ ll_cl_fini(lcc);
+ } else {
+ unlock_page(vmpage);
+ result = PTR_ERR(lcc);
+ }
+ RETURN(result);
}
+